summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug14
-rw-r--r--lib/Makefile3
-rw-r--r--lib/bucket_locks.c54
-rw-r--r--lib/error-inject.c242
-rw-r--r--lib/rhashtable.c160
-rw-r--r--lib/test_bpf.c112
-rw-r--r--lib/test_rhashtable.c6
7 files changed, 525 insertions, 66 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2a1f28c183d3..64d7c19d3167 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1500,6 +1500,10 @@ config FAULT_INJECTION
Provide fault-injection framework.
For more details, see Documentation/fault-injection/.
+config FUNCTION_ERROR_INJECTION
+ def_bool y
+ depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+
config FAILSLAB
bool "Fault-injection capability for kmalloc"
depends on FAULT_INJECTION
@@ -1547,6 +1551,16 @@ config FAIL_FUTEX
help
Provide fault-injection capability for futexes.
+config FAIL_FUNCTION
+ bool "Fault-injection capability for functions"
+ depends on FAULT_INJECTION_DEBUG_FS && FUNCTION_ERROR_INJECTION
+ help
+ Provide function-based fault-injection capability.
+ This will allow you to override a specific function with a return
+ with given return value. As a result, function caller will see
+ an error value and have to handle it. This is useful to test the
+ error handling in various subsystems.
+
config FAULT_INJECTION_DEBUG_FS
bool "Debugfs entries for fault-injection capabilities"
depends on FAULT_INJECTION && SYSFS && DEBUG_FS
diff --git a/lib/Makefile b/lib/Makefile
index 749851abe85a..7adb066692b3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -39,7 +39,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
- once.o refcount.o usercopy.o errseq.o
+ once.o refcount.o usercopy.o errseq.o bucket_locks.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -149,6 +149,7 @@ obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
of-reconfig-notifier-error-inject.o
+obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
new file mode 100644
index 000000000000..266a97c5708b
--- /dev/null
+++ b/lib/bucket_locks.c
@@ -0,0 +1,54 @@
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
+ * indicate the number of elements to allocate in the array. max_size
+ * gives the maximum number of elements to allocate. cpu_mult gives
+ * the number of locks per CPU to allocate. The size is rounded up
+ * to a power of 2 to be suitable as a hash table.
+ */
+
+int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
+ size_t max_size, unsigned int cpu_mult, gfp_t gfp)
+{
+ spinlock_t *tlocks = NULL;
+ unsigned int i, size;
+#if defined(CONFIG_PROVE_LOCKING)
+ unsigned int nr_pcpus = 2;
+#else
+ unsigned int nr_pcpus = num_possible_cpus();
+#endif
+
+ if (cpu_mult) {
+ nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
+ size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
+ } else {
+ size = max_size;
+ }
+
+ if (sizeof(spinlock_t) != 0) {
+ if (gfpflags_allow_blocking(gfp))
+ tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
+ else
+ tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
+ if (!tlocks)
+ return -ENOMEM;
+ for (i = 0; i < size; i++)
+ spin_lock_init(&tlocks[i]);
+ }
+
+ *locks = tlocks;
+ *locks_mask = size - 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(alloc_bucket_spinlocks);
+
+void free_bucket_spinlocks(spinlock_t *locks)
+{
+ kvfree(locks);
+}
+EXPORT_SYMBOL(free_bucket_spinlocks);
diff --git a/lib/error-inject.c b/lib/error-inject.c
new file mode 100644
index 000000000000..c0d4600f4896
--- /dev/null
+++ b/lib/error-inject.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+// error-inject.c: Function-level error injection table
+#include <linux/error-injection.h>
+#include <linux/debugfs.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+/* Whitelist of symbols that can be overridden for error injection. */
+static LIST_HEAD(error_injection_list);
+static DEFINE_MUTEX(ei_mutex);
+struct ei_entry {
+ struct list_head list;
+ unsigned long start_addr;
+ unsigned long end_addr;
+ int etype;
+ void *priv;
+};
+
+bool within_error_injection_list(unsigned long addr)
+{
+ struct ei_entry *ent;
+ bool ret = false;
+
+ mutex_lock(&ei_mutex);
+ list_for_each_entry(ent, &error_injection_list, list) {
+ if (addr >= ent->start_addr && addr < ent->end_addr) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&ei_mutex);
+ return ret;
+}
+
+int get_injectable_error_type(unsigned long addr)
+{
+ struct ei_entry *ent;
+
+ list_for_each_entry(ent, &error_injection_list, list) {
+ if (addr >= ent->start_addr && addr < ent->end_addr)
+ return ent->etype;
+ }
+ return EI_ETYPE_NONE;
+}
+
+/*
+ * Lookup and populate the error_injection_list.
+ *
+ * For safety reasons we only allow certain functions to be overridden with
+ * bpf_error_injection, so we need to populate the list of the symbols that have
+ * been marked as safe for overriding.
+ */
+static void populate_error_injection_list(struct error_injection_entry *start,
+ struct error_injection_entry *end,
+ void *priv)
+{
+ struct error_injection_entry *iter;
+ struct ei_entry *ent;
+ unsigned long entry, offset = 0, size = 0;
+
+ mutex_lock(&ei_mutex);
+ for (iter = start; iter < end; iter++) {
+ entry = arch_deref_entry_point((void *)iter->addr);
+
+ if (!kernel_text_address(entry) ||
+ !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+ pr_err("Failed to find error inject entry at %p\n",
+ (void *)entry);
+ continue;
+ }
+
+ ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+ if (!ent)
+ break;
+ ent->start_addr = entry;
+ ent->end_addr = entry + size;
+ ent->etype = iter->etype;
+ ent->priv = priv;
+ INIT_LIST_HEAD(&ent->list);
+ list_add_tail(&ent->list, &error_injection_list);
+ }
+ mutex_unlock(&ei_mutex);
+}
+
+/* Markers of the _error_inject_whitelist section */
+extern struct error_injection_entry __start_error_injection_whitelist[];
+extern struct error_injection_entry __stop_error_injection_whitelist[];
+
+static void __init populate_kernel_ei_list(void)
+{
+ populate_error_injection_list(__start_error_injection_whitelist,
+ __stop_error_injection_whitelist,
+ NULL);
+}
+
+#ifdef CONFIG_MODULES
+static void module_load_ei_list(struct module *mod)
+{
+ if (!mod->num_ei_funcs)
+ return;
+
+ populate_error_injection_list(mod->ei_funcs,
+ mod->ei_funcs + mod->num_ei_funcs, mod);
+}
+
+static void module_unload_ei_list(struct module *mod)
+{
+ struct ei_entry *ent, *n;
+
+ if (!mod->num_ei_funcs)
+ return;
+
+ mutex_lock(&ei_mutex);
+ list_for_each_entry_safe(ent, n, &error_injection_list, list) {
+ if (ent->priv == mod) {
+ list_del_init(&ent->list);
+ kfree(ent);
+ }
+ }
+ mutex_unlock(&ei_mutex);
+}
+
+/* Module notifier call back, checking error injection table on the module */
+static int ei_module_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct module *mod = data;
+
+ if (val == MODULE_STATE_COMING)
+ module_load_ei_list(mod);
+ else if (val == MODULE_STATE_GOING)
+ module_unload_ei_list(mod);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ei_module_nb = {
+ .notifier_call = ei_module_callback,
+ .priority = 0
+};
+
+static __init int module_ei_init(void)
+{
+ return register_module_notifier(&ei_module_nb);
+}
+#else /* !CONFIG_MODULES */
+#define module_ei_init() (0)
+#endif
+
+/*
+ * error_injection/whitelist -- shows which functions can be overridden for
+ * error injection.
+ */
+static void *ei_seq_start(struct seq_file *m, loff_t *pos)
+{
+ mutex_lock(&ei_mutex);
+ return seq_list_start(&error_injection_list, *pos);
+}
+
+static void ei_seq_stop(struct seq_file *m, void *v)
+{
+ mutex_unlock(&ei_mutex);
+}
+
+static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &error_injection_list, pos);
+}
+
+static const char *error_type_string(int etype)
+{
+ switch (etype) {
+ case EI_ETYPE_NULL:
+ return "NULL";
+ case EI_ETYPE_ERRNO:
+ return "ERRNO";
+ case EI_ETYPE_ERRNO_NULL:
+ return "ERRNO_NULL";
+ default:
+ return "(unknown)";
+ }
+}
+
+static int ei_seq_show(struct seq_file *m, void *v)
+{
+ struct ei_entry *ent = list_entry(v, struct ei_entry, list);
+
+ seq_printf(m, "%pf\t%s\n", (void *)ent->start_addr,
+ error_type_string(ent->etype));
+ return 0;
+}
+
+static const struct seq_operations ei_seq_ops = {
+ .start = ei_seq_start,
+ .next = ei_seq_next,
+ .stop = ei_seq_stop,
+ .show = ei_seq_show,
+};
+
+static int ei_open(struct inode *inode, struct file *filp)
+{
+ return seq_open(filp, &ei_seq_ops);
+}
+
+static const struct file_operations debugfs_ei_ops = {
+ .open = ei_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init ei_debugfs_init(void)
+{
+ struct dentry *dir, *file;
+
+ dir = debugfs_create_dir("error_injection", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops);
+ if (!file) {
+ debugfs_remove(dir);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int __init init_error_injection(void)
+{
+ populate_kernel_ei_list();
+
+ if (!module_ei_init())
+ ei_debugfs_init();
+
+ return 0;
+}
+late_initcall(init_error_injection);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index ddd7dde87c3c..3825c30aaa36 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -65,42 +65,6 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#define ASSERT_RHT_MUTEX(HT)
#endif
-
-static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
- gfp_t gfp)
-{
- unsigned int i, size;
-#if defined(CONFIG_PROVE_LOCKING)
- unsigned int nr_pcpus = 2;
-#else
- unsigned int nr_pcpus = num_possible_cpus();
-#endif
-
- nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
- size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
-
- /* Never allocate more than 0.5 locks per bucket */
- size = min_t(unsigned int, size, tbl->size >> 1);
-
- if (tbl->nest)
- size = min(size, 1U << tbl->nest);
-
- if (sizeof(spinlock_t) != 0) {
- if (gfpflags_allow_blocking(gfp))
- tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
- else
- tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
- gfp);
- if (!tbl->locks)
- return -ENOMEM;
- for (i = 0; i < size; i++)
- spin_lock_init(&tbl->locks[i]);
- }
- tbl->locks_mask = size - 1;
-
- return 0;
-}
-
static void nested_table_free(union nested_table *ntbl, unsigned int size)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
@@ -140,7 +104,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
if (tbl->nest)
nested_bucket_table_free(tbl);
- kvfree(tbl->locks);
+ free_bucket_spinlocks(tbl->locks);
kvfree(tbl);
}
@@ -207,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
gfp_t gfp)
{
struct bucket_table *tbl = NULL;
- size_t size;
+ size_t size, max_locks;
int i;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
@@ -227,7 +191,12 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
tbl->size = size;
- if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
+ max_locks = size >> 1;
+ if (tbl->nest)
+ max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
+
+ if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
+ ht->p.locks_mul, gfp) < 0) {
bucket_table_free(tbl);
return NULL;
}
@@ -707,6 +676,7 @@ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
iter->p = NULL;
iter->slot = 0;
iter->skip = 0;
+ iter->end_of_table = 0;
spin_lock(&ht->lock);
iter->walker.tbl =
@@ -732,7 +702,7 @@ void rhashtable_walk_exit(struct rhashtable_iter *iter)
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
/**
- * rhashtable_walk_start - Start a hash table walk
+ * rhashtable_walk_start_check - Start a hash table walk
* @iter: Hash table iterator
*
* Start a hash table walk at the current iterator position. Note that we take
@@ -744,8 +714,12 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
* Returns -EAGAIN if resize event occured. Note that the iterator
* will rewind back to the beginning and you may use it immediately
* by calling rhashtable_walk_next.
+ *
+ * rhashtable_walk_start is defined as an inline variant that returns
+ * void. This is preferred in cases where the caller would ignore
+ * resize events and always continue.
*/
-int rhashtable_walk_start(struct rhashtable_iter *iter)
+int rhashtable_walk_start_check(struct rhashtable_iter *iter)
__acquires(RCU)
{
struct rhashtable *ht = iter->ht;
@@ -757,28 +731,26 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
list_del(&iter->walker.list);
spin_unlock(&ht->lock);
- if (!iter->walker.tbl) {
+ if (!iter->walker.tbl && !iter->end_of_table) {
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
return -EAGAIN;
}
return 0;
}
-EXPORT_SYMBOL_GPL(rhashtable_walk_start);
+EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
/**
- * rhashtable_walk_next - Return the next object and advance the iterator
- * @iter: Hash table iterator
+ * __rhashtable_walk_find_next - Find the next element in a table (or the first
+ * one in case of a new walk).
*
- * Note that you must call rhashtable_walk_stop when you are finished
- * with the walk.
+ * @iter: Hash table iterator
*
- * Returns the next object or NULL when the end of the table is reached.
+ * Returns the found object or NULL when the end of the table is reached.
*
- * Returns -EAGAIN if resize event occured. Note that the iterator
- * will rewind back to the beginning and you may continue to use it.
+ * Returns -EAGAIN if resize event occurred.
*/
-void *rhashtable_walk_next(struct rhashtable_iter *iter)
+static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
{
struct bucket_table *tbl = iter->walker.tbl;
struct rhlist_head *list = iter->list;
@@ -786,13 +758,8 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
struct rhash_head *p = iter->p;
bool rhlist = ht->rhlist;
- if (p) {
- if (!rhlist || !(list = rcu_dereference(list->next))) {
- p = rcu_dereference(p->next);
- list = container_of(p, struct rhlist_head, rhead);
- }
- goto next;
- }
+ if (!tbl)
+ return NULL;
for (; iter->slot < tbl->size; iter->slot++) {
int skip = iter->skip;
@@ -836,13 +803,90 @@ next:
iter->slot = 0;
iter->skip = 0;
return ERR_PTR(-EAGAIN);
+ } else {
+ iter->end_of_table = true;
}
return NULL;
}
+
+/**
+ * rhashtable_walk_next - Return the next object and advance the iterator
+ * @iter: Hash table iterator
+ *
+ * Note that you must call rhashtable_walk_stop when you are finished
+ * with the walk.
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_next(struct rhashtable_iter *iter)
+{
+ struct rhlist_head *list = iter->list;
+ struct rhashtable *ht = iter->ht;
+ struct rhash_head *p = iter->p;
+ bool rhlist = ht->rhlist;
+
+ if (p) {
+ if (!rhlist || !(list = rcu_dereference(list->next))) {
+ p = rcu_dereference(p->next);
+ list = container_of(p, struct rhlist_head, rhead);
+ }
+ if (!rht_is_a_nulls(p)) {
+ iter->skip++;
+ iter->p = p;
+ iter->list = list;
+ return rht_obj(ht, rhlist ? &list->rhead : p);
+ }
+
+ /* At the end of this slot, switch to next one and then find
+ * next entry from that point.
+ */
+ iter->skip = 0;
+ iter->slot++;
+ }
+
+ return __rhashtable_walk_find_next(iter);
+}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);
/**
+ * rhashtable_walk_peek - Return the next object but don't advance the iterator
+ * @iter: Hash table iterator
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_peek(struct rhashtable_iter *iter)
+{
+ struct rhlist_head *list = iter->list;
+ struct rhashtable *ht = iter->ht;
+ struct rhash_head *p = iter->p;
+
+ if (p)
+ return rht_obj(ht, ht->rhlist ? &list->rhead : p);
+
+ /* No object found in current iter, find next one in the table. */
+
+ if (iter->skip) {
+ /* A nonzero skip value points to the next entry in the table
+ * beyond that last one that was found. Decrement skip so
+ * we find the current value. __rhashtable_walk_find_next
+ * will restore the original value of skip assuming that
+ * the table hasn't changed.
+ */
+ iter->skip--;
+ }
+
+ return __rhashtable_walk_find_next(iter);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
+
+/**
* rhashtable_walk_stop - Finish a hash table walk
* @iter: Hash table iterator
*
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index f369889e521d..4cd9ea9b3449 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -2003,10 +2003,14 @@ static struct bpf_test tests[] = {
{ { 4, 0 }, { 5, 10 } }
},
{
- "INT: DIV by zero",
+ /* This one doesn't go through verifier, but is just raw insn
+ * as opposed to cBPF tests from here. Thus div by 0 tests are
+ * done in test_verifier in BPF kselftests.
+ */
+ "INT: DIV by -1",
.u.insns_int = {
BPF_ALU64_REG(BPF_MOV, R6, R1),
- BPF_ALU64_IMM(BPF_MOV, R7, 0),
+ BPF_ALU64_IMM(BPF_MOV, R7, -1),
BPF_LD_ABS(BPF_B, 3),
BPF_ALU32_REG(BPF_DIV, R0, R7),
BPF_EXIT_INSN(),
@@ -6109,6 +6113,110 @@ static struct bpf_test tests[] = {
{ { ETH_HLEN, 42 } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
},
+ /* Checking interpreter vs JIT wrt signed extended imms. */
+ {
+ "JNE signed compare, test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JNE, R2, R4, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 4",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, -17104896),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 5",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0xfefb0000),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x7efb0000),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 7",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, 2),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { { 0, 2 } },
+ },
};
static struct net_device dev;
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 8e83cbdc049c..76d3667fdea2 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -162,11 +162,7 @@ static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
return;
}
- err = rhashtable_walk_start(&hti);
- if (err && err != -EAGAIN) {
- pr_warn("Test failed: iterator failed: %d\n", err);
- return;
- }
+ rhashtable_walk_start(&hti);
while ((pos = rhashtable_walk_next(&hti))) {
if (PTR_ERR(pos) == -EAGAIN) {