summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 10:31:17 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-01 10:31:17 -0800
commitf6cff79f1d122f78a4b35bf4b2f0112afcd89ea4 (patch)
treecf3a38576f9adbb3860982c25f72aebed2bb541a /drivers
parent47fcc0360cfb3fe82e4daddacad3c1cd80b0b75d (diff)
parent9ff6576e124b1227c27c1da43fe5f8ee908263e0 (diff)
downloadlinux-f6cff79f1d122f78a4b35bf4b2f0112afcd89ea4.tar.gz
linux-f6cff79f1d122f78a4b35bf4b2f0112afcd89ea4.tar.bz2
linux-f6cff79f1d122f78a4b35bf4b2f0112afcd89ea4.zip
Merge tag 'char-misc-4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the big pull request for char/misc drivers for 4.16-rc1. There's a lot of stuff in here. Three new driver subsystems were added for various types of hardware busses: - siox - slimbus - soundwire as well as a new vboxguest subsystem for the VirtualBox hypervisor drivers. There's also big updates from the FPGA subsystem, lots of Android binder fixes, the usual handful of hyper-v updates, and lots of other smaller driver updates. All of these have been in linux-next for a long time, with no reported issues" * tag 'char-misc-4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (155 commits) char: lp: use true or false for boolean values android: binder: use VM_ALLOC to get vm area android: binder: Use true and false for boolean values lkdtm: fix handle_irq_event symbol for INT_HW_IRQ_EN EISA: Delete error message for a failed memory allocation in eisa_probe() EISA: Whitespace cleanup misc: remove AVR32 dependencies virt: vbox: Add error mapping for VERR_INVALID_NAME and VERR_NO_MORE_FILES soundwire: Fix a signedness bug uio_hv_generic: fix new type mismatch warnings uio_hv_generic: fix type mismatch warnings auxdisplay: img-ascii-lcd: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE uio_hv_generic: add rescind support uio_hv_generic: check that host supports monitor page uio_hv_generic: create send and receive buffers uio: document uio_hv_generic regions doc: fix documentation about uio_hv_generic vmbus: add monitor_id and subchannel_id to sysfs per channel vmbus: fix ABI documentation uio_hv_generic: use ISR callback method ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig6
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/android/binder.c196
-rw-r--r--drivers/android/binder_alloc.c29
-rw-r--r--drivers/android/binder_alloc.h4
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c4
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/regmap-slimbus.c80
-rw-r--r--drivers/char/lp.c69
-rw-r--r--drivers/char/mem.c27
-rw-r--r--drivers/char/xillybus/Kconfig4
-rw-r--r--drivers/char/xillybus/xillybus_of.c12
-rw-r--r--drivers/eisa/eisa-bus.c62
-rw-r--r--drivers/eisa/pci_eisa.c10
-rw-r--r--drivers/eisa/virtual_root.c19
-rw-r--r--drivers/extcon/extcon-adc-jack.c2
-rw-r--r--drivers/extcon/extcon-axp288.c36
-rw-r--r--drivers/extcon/extcon-max77693.c2
-rw-r--r--drivers/extcon/extcon-max8997.c2
-rw-r--r--drivers/fpga/Kconfig103
-rw-r--r--drivers/fpga/Makefile1
-rw-r--r--drivers/fpga/fpga-bridge.c113
-rw-r--r--drivers/fpga/fpga-mgr.c123
-rw-r--r--drivers/fpga/fpga-region.c464
-rw-r--r--drivers/fpga/of-fpga-region.c504
-rw-r--r--drivers/fpga/socfpga-a10.c8
-rw-r--r--drivers/fsi/Kconfig6
-rw-r--r--drivers/hv/hv.c3
-rw-r--r--drivers/hv/vmbus_drv.c40
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c17
-rw-r--r--drivers/hwtracing/coresight/coresight.c25
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/ad525x_dpot.c30
-rw-r--r--drivers/misc/ad525x_dpot.h12
-rw-r--r--drivers/misc/apds990x.c11
-rw-r--r--drivers/misc/ds1682.c35
-rw-r--r--drivers/misc/eeprom/at25.c3
-rw-r--r--drivers/misc/enclosure.c12
-rw-r--r--drivers/misc/fsa9480.c1
-rw-r--r--drivers/misc/genwqe/card_base.c16
-rw-r--r--drivers/misc/genwqe/card_base.h20
-rw-r--r--drivers/misc/genwqe/card_ddcb.c22
-rw-r--r--drivers/misc/genwqe/card_debugfs.c2
-rw-r--r--drivers/misc/genwqe/card_dev.c19
-rw-r--r--drivers/misc/genwqe/card_utils.c25
-rw-r--r--drivers/misc/hpilo.c5
-rw-r--r--drivers/misc/hpilo.h5
-rw-r--r--drivers/misc/ics932s401.c8
-rw-r--r--drivers/misc/isl29003.c7
-rw-r--r--drivers/misc/lkdtm_core.c2
-rw-r--r--drivers/misc/lkdtm_heap.c4
-rw-r--r--drivers/misc/mei/bus.c10
-rw-r--r--drivers/misc/mei/hw-me.c4
-rw-r--r--drivers/misc/mei/hw-txe.c4
-rw-r--r--drivers/misc/mei/init.c4
-rw-r--r--drivers/misc/mei/pci-me.c5
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c22
-rw-r--r--drivers/misc/vexpress-syscfg.c4
-rw-r--r--drivers/mux/Kconfig1
-rw-r--r--drivers/mux/Makefile1
-rw-r--r--drivers/mux/adg792a.c5
-rw-r--r--drivers/mux/core.c5
-rw-r--r--drivers/mux/gpio.c5
-rw-r--r--drivers/mux/mmio.c5
-rw-r--r--drivers/nvmem/core.c17
-rw-r--r--drivers/nvmem/rockchip-efuse.c70
-rw-r--r--drivers/nvmem/uniphier-efuse.c10
-rw-r--r--drivers/siox/Kconfig18
-rw-r--r--drivers/siox/Makefile2
-rw-r--r--drivers/siox/siox-bus-gpio.c172
-rw-r--r--drivers/siox/siox-core.c934
-rw-r--r--drivers/siox/siox.h49
-rw-r--r--drivers/slimbus/Kconfig24
-rw-r--r--drivers/slimbus/Makefile10
-rw-r--r--drivers/slimbus/core.c480
-rw-r--r--drivers/slimbus/messaging.c332
-rw-r--r--drivers/slimbus/qcom-ctrl.c747
-rw-r--r--drivers/slimbus/sched.c121
-rw-r--r--drivers/slimbus/slimbus.h261
-rw-r--r--drivers/soundwire/Kconfig37
-rw-r--r--drivers/soundwire/Makefile18
-rw-r--r--drivers/soundwire/bus.c997
-rw-r--r--drivers/soundwire/bus.h71
-rw-r--r--drivers/soundwire/bus_type.c193
-rw-r--r--drivers/soundwire/cadence_master.c751
-rw-r--r--drivers/soundwire/cadence_master.h48
-rw-r--r--drivers/soundwire/intel.c345
-rw-r--r--drivers/soundwire/intel.h23
-rw-r--r--drivers/soundwire/intel_init.c198
-rw-r--r--drivers/soundwire/mipi_disco.c401
-rw-r--r--drivers/soundwire/slave.c114
-rw-r--r--drivers/uio/uio_hv_generic.c138
-rw-r--r--drivers/virt/Kconfig1
-rw-r--r--drivers/virt/Makefile1
-rw-r--r--drivers/virt/vboxguest/Kconfig18
-rw-r--r--drivers/virt/vboxguest/Makefile3
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c1571
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h174
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c466
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c803
-rw-r--r--drivers/virt/vboxguest/vboxguest_version.h19
-rw-r--r--drivers/virt/vboxguest/vmmdev.h449
-rw-r--r--drivers/vme/vme.c2
107 files changed, 11461 insertions, 933 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index ef5fb8395d76..879dc0604cba 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -153,6 +153,8 @@ source "drivers/remoteproc/Kconfig"
source "drivers/rpmsg/Kconfig"
+source "drivers/soundwire/Kconfig"
+
source "drivers/soc/Kconfig"
source "drivers/devfreq/Kconfig"
@@ -213,4 +215,8 @@ source "drivers/opp/Kconfig"
source "drivers/visorbus/Kconfig"
+source "drivers/siox/Kconfig"
+
+source "drivers/slimbus/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 7a2330077e47..7a0438744053 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -87,6 +87,7 @@ obj-$(CONFIG_MTD) += mtd/
obj-$(CONFIG_SPI) += spi/
obj-$(CONFIG_SPMI) += spmi/
obj-$(CONFIG_HSI) += hsi/
+obj-$(CONFIG_SLIMBUS) += slimbus/
obj-y += net/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_FUSION) += message/
@@ -157,6 +158,7 @@ obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
obj-$(CONFIG_REMOTEPROC) += remoteproc/
obj-$(CONFIG_RPMSG) += rpmsg/
+obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
obj-$(CONFIG_VIRT_DRIVERS) += virt/
@@ -185,3 +187,4 @@ obj-$(CONFIG_FSI) += fsi/
obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
+obj-$(CONFIG_SIOX) += siox/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index cc89d0d2b965..d21040c5d343 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -141,7 +141,7 @@ enum {
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
-module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+module_param_named(debug_mask, binder_debug_mask, uint, 0644);
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, 0444);
@@ -160,7 +160,7 @@ static int binder_set_stop_on_user_error(const char *val,
return ret;
}
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
- param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+ param_get_int, &binder_stop_on_user_error, 0644);
#define binder_debug(mask, x...) \
do { \
@@ -249,7 +249,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
unsigned int cur = atomic_inc_return(&log->cur);
if (cur >= ARRAY_SIZE(log->entry))
- log->full = 1;
+ log->full = true;
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
WRITE_ONCE(e->debug_id_done, 0);
/*
@@ -493,8 +493,6 @@ enum binder_deferred_state {
* (protected by @inner_lock)
* @todo: list of work for this process
* (protected by @inner_lock)
- * @wait: wait queue head to wait for proc work
- * (invariant after initialized)
* @stats: per-process binder statistics
* (atomics, no lock needed)
* @delivered_death: list of delivered death notification
@@ -537,7 +535,6 @@ struct binder_proc {
bool is_dead;
struct list_head todo;
- wait_queue_head_t wait;
struct binder_stats stats;
struct list_head delivered_death;
int max_threads;
@@ -579,6 +576,8 @@ enum {
* (protected by @proc->inner_lock)
* @todo: list of work to do for this thread
* (protected by @proc->inner_lock)
+ * @process_todo: whether work in @todo should be processed
+ * (protected by @proc->inner_lock)
* @return_error: transaction errors reported by this thread
* (only accessed by this thread)
* @reply_error: transaction errors reported by target thread
@@ -604,6 +603,7 @@ struct binder_thread {
bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
+ bool process_todo;
struct binder_error return_error;
struct binder_error reply_error;
wait_queue_head_t wait;
@@ -789,6 +789,16 @@ static bool binder_worklist_empty(struct binder_proc *proc,
return ret;
}
+/**
+ * binder_enqueue_work_ilocked() - Add an item to the work list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
static void
binder_enqueue_work_ilocked(struct binder_work *work,
struct list_head *target_list)
@@ -799,22 +809,56 @@ binder_enqueue_work_ilocked(struct binder_work *work,
}
/**
- * binder_enqueue_work() - Add an item to the work list
- * @proc: binder_proc associated with list
+ * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
+ * @thread: thread to queue work to
* @work: struct binder_work to add to list
- * @target_list: list to add work to
*
- * Adds the work to the specified list. Asserts that work
- * is not already on a list.
+ * Adds the work to the todo list of the thread. Doesn't set the process_todo
+ * flag, which means that (if it wasn't already set) the thread will go to
+ * sleep without handling this work when it calls read.
+ *
+ * Requires the proc->inner_lock to be held.
*/
static void
-binder_enqueue_work(struct binder_proc *proc,
- struct binder_work *work,
- struct list_head *target_list)
+binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
+ struct binder_work *work)
{
- binder_inner_proc_lock(proc);
- binder_enqueue_work_ilocked(work, target_list);
- binder_inner_proc_unlock(proc);
+ binder_enqueue_work_ilocked(work, &thread->todo);
+}
+
+/**
+ * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
+ * @thread: thread to queue work to
+ * @work: struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
+static void
+binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
+ struct binder_work *work)
+{
+ binder_enqueue_work_ilocked(work, &thread->todo);
+ thread->process_todo = true;
+}
+
+/**
+ * binder_enqueue_thread_work() - Add an item to the thread work list
+ * @thread: thread to queue work to
+ * @work: struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ */
+static void
+binder_enqueue_thread_work(struct binder_thread *thread,
+ struct binder_work *work)
+{
+ binder_inner_proc_lock(thread->proc);
+ binder_enqueue_thread_work_ilocked(thread, work);
+ binder_inner_proc_unlock(thread->proc);
}
static void
@@ -940,7 +984,7 @@ err:
static bool binder_has_work_ilocked(struct binder_thread *thread,
bool do_proc_work)
{
- return !binder_worklist_empty_ilocked(&thread->todo) ||
+ return thread->process_todo ||
thread->looper_need_return ||
(do_proc_work &&
!binder_worklist_empty_ilocked(&thread->proc->todo));
@@ -1228,6 +1272,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
binder_dequeue_work_ilocked(&node->work);
+ /*
+ * Note: this function is the only place where we queue
+ * directly to a thread->todo without using the
+ * corresponding binder_enqueue_thread_work() helper
+ * functions; in this case it's ok to not set the
+ * process_todo flag, since we know this node work will
+ * always be followed by other work that starts queue
+ * processing: in case of synchronous transactions, a
+ * BR_REPLY or BR_ERROR; in case of oneway
+ * transactions, a BR_TRANSACTION_COMPLETE.
+ */
binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
@@ -1239,6 +1294,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->debug_id);
return -EINVAL;
}
+ /*
+ * See comment above
+ */
binder_enqueue_work_ilocked(&node->work, target_list);
}
}
@@ -1928,9 +1986,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
binder_pop_transaction_ilocked(target_thread, t);
if (target_thread->reply_error.cmd == BR_OK) {
target_thread->reply_error.cmd = error_code;
- binder_enqueue_work_ilocked(
- &target_thread->reply_error.work,
- &target_thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ target_thread,
+ &target_thread->reply_error.work);
wake_up_interruptible(&target_thread->wait);
} else {
WARN(1, "Unexpected reply error: %u\n",
@@ -2569,20 +2627,18 @@ static bool binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
{
- struct list_head *target_list = NULL;
struct binder_node *node = t->buffer->target_node;
bool oneway = !!(t->flags & TF_ONE_WAY);
- bool wakeup = true;
+ bool pending_async = false;
BUG_ON(!node);
binder_node_lock(node);
if (oneway) {
BUG_ON(thread);
if (node->has_async_transaction) {
- target_list = &node->async_todo;
- wakeup = false;
+ pending_async = true;
} else {
- node->has_async_transaction = 1;
+ node->has_async_transaction = true;
}
}
@@ -2594,19 +2650,17 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return false;
}
- if (!thread && !target_list)
+ if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc);
if (thread)
- target_list = &thread->todo;
- else if (!target_list)
- target_list = &proc->todo;
+ binder_enqueue_thread_work_ilocked(thread, &t->work);
+ else if (!pending_async)
+ binder_enqueue_work_ilocked(&t->work, &proc->todo);
else
- BUG_ON(target_list != &node->async_todo);
+ binder_enqueue_work_ilocked(&t->work, &node->async_todo);
- binder_enqueue_work_ilocked(&t->work, target_list);
-
- if (wakeup)
+ if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
binder_inner_proc_unlock(proc);
@@ -3101,10 +3155,10 @@ static void binder_transaction(struct binder_proc *proc,
}
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- binder_enqueue_work(proc, tcomplete, &thread->todo);
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
+ binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) {
binder_inner_proc_unlock(target_proc);
@@ -3112,13 +3166,21 @@ static void binder_transaction(struct binder_proc *proc,
}
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to);
- binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+ binder_enqueue_thread_work_ilocked(target_thread, &t->work);
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
+ /*
+ * Defer the TRANSACTION_COMPLETE, so we don't return to
+ * userspace immediately; this allows the target process to
+ * immediately start processing this transaction, reducing
+ * latency. We will then return the TRANSACTION_COMPLETE when
+ * the target replies (or there is an error).
+ */
+ binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
@@ -3132,6 +3194,7 @@ static void binder_transaction(struct binder_proc *proc,
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
+ binder_enqueue_thread_work(thread, tcomplete);
if (!binder_proc_transaction(t, target_proc, NULL))
goto err_dead_proc_or_thread;
}
@@ -3210,15 +3273,11 @@ err_invalid_target_handle:
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
- binder_enqueue_work(thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
binder_send_failed_reply(in_reply_to, return_error);
} else {
thread->return_error.cmd = return_error;
- binder_enqueue_work(thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
}
}
@@ -3424,7 +3483,7 @@ static int binder_thread_write(struct binder_proc *proc,
w = binder_dequeue_work_head_ilocked(
&buf_node->async_todo);
if (!w) {
- buf_node->has_async_transaction = 0;
+ buf_node->has_async_transaction = false;
} else {
binder_enqueue_work_ilocked(
w, &proc->todo);
@@ -3522,10 +3581,9 @@ static int binder_thread_write(struct binder_proc *proc,
WARN_ON(thread->return_error.cmd !=
BR_OK);
thread->return_error.cmd = BR_ERROR;
- binder_enqueue_work(
- thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(
+ thread,
+ &thread->return_error.work);
binder_debug(
BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
@@ -3605,9 +3663,9 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_work_ilocked(
- &death->work,
- &thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ thread,
+ &death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -3662,8 +3720,8 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_work_ilocked(
- &death->work, &thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ thread, &death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -3837,6 +3895,8 @@ retry:
break;
}
w = binder_dequeue_work_head_ilocked(list);
+ if (binder_worklist_empty_ilocked(&thread->todo))
+ thread->process_todo = false;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
@@ -4302,6 +4362,18 @@ static int binder_thread_release(struct binder_proc *proc,
if (t)
spin_lock(&t->lock);
}
+
+ /*
+ * If this thread used poll, make sure we remove the waitqueue
+ * from any epoll data structures holding it with POLLFREE.
+ * waitqueue_active() is safe to use here because we're holding
+ * the inner lock.
+ */
+ if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
+ waitqueue_active(&thread->wait)) {
+ wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
+ }
+
binder_inner_proc_unlock(thread->proc);
if (send_reply)
@@ -4646,7 +4718,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
err_bad_arg:
- pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
}
@@ -4656,7 +4728,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
struct binder_proc *proc;
struct binder_device *binder_dev;
- binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
@@ -4695,7 +4767,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
* anyway print all contexts that a given PID has, so this
* is not a problem.
*/
- proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+ proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
binder_debugfs_dir_entry_proc,
(void *)(unsigned long)proc->pid,
&binder_proc_fops);
@@ -5524,7 +5596,9 @@ static int __init binder_init(void)
struct binder_device *device;
struct hlist_node *tmp;
- binder_alloc_shrinker_init();
+ ret = binder_alloc_shrinker_init();
+ if (ret)
+ return ret;
atomic_set(&binder_transaction_log.cur, ~0U);
atomic_set(&binder_transaction_log_failed.cur, ~0U);
@@ -5536,27 +5610,27 @@ static int __init binder_init(void)
if (binder_debugfs_dir_entry_root) {
debugfs_create_file("state",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_state_fops);
debugfs_create_file("stats",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_stats_fops);
debugfs_create_file("transactions",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_transactions_fops);
debugfs_create_file("transaction_log",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log,
&binder_transaction_log_fops);
debugfs_create_file("failed_transaction_log",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log_failed,
&binder_transaction_log_fops);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6f6f745605af..5a426c877dfb 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -281,6 +281,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
goto err_vm_insert_page_failed;
}
+ if (index + 1 > alloc->pages_high)
+ alloc->pages_high = index + 1;
+
trace_binder_alloc_page_end(alloc, index);
/* vm_insert_page does not seem to increment the refcount */
}
@@ -324,11 +327,12 @@ err_no_vma:
return vma ? -ENOMEM : -ESRCH;
}
-struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async)
+static struct binder_buffer *binder_alloc_new_buf_locked(
+ struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -666,7 +670,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_already_mapped;
}
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
@@ -853,6 +857,7 @@ void binder_alloc_print_pages(struct seq_file *m,
}
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+ seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
/**
@@ -1002,8 +1007,14 @@ void binder_alloc_init(struct binder_alloc *alloc)
INIT_LIST_HEAD(&alloc->buffers);
}
-void binder_alloc_shrinker_init(void)
+int binder_alloc_shrinker_init(void)
{
- list_lru_init(&binder_alloc_lru);
- register_shrinker(&binder_shrinker);
+ int ret = list_lru_init(&binder_alloc_lru);
+
+ if (ret == 0) {
+ ret = register_shrinker(&binder_shrinker);
+ if (ret)
+ list_lru_destroy(&binder_alloc_lru);
+ }
+ return ret;
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 2dd33b6df104..9ef64e563856 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -92,6 +92,7 @@ struct binder_lru_page {
* @pages: array of binder_lru_page
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
+ * @pages_high: high watermark of offset in @pages
*
* Bookkeeping structure for per-proc address space management for binder
* buffers. It is normally initialized during binder_init() and binder_mmap()
@@ -112,6 +113,7 @@ struct binder_alloc {
size_t buffer_size;
uint32_t buffer_free;
int pid;
+ size_t pages_high;
};
#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
@@ -128,7 +130,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t extra_buffers_size,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
-void binder_alloc_shrinker_init(void);
+extern int binder_alloc_shrinker_init(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index db040b378224..9180b9bd5821 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -441,3 +441,7 @@ static struct platform_driver img_ascii_lcd_driver = {
.remove = img_ascii_lcd_remove,
};
module_platform_driver(img_ascii_lcd_driver);
+
+MODULE_DESCRIPTION("Imagination Technologies ASCII LCD Display");
+MODULE_AUTHOR("Paul Burton <paul.burton@mips.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 067073e4beb1..aff34c0c2a3e 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -20,6 +20,10 @@ config REGMAP_I2C
tristate
depends on I2C
+config REGMAP_SLIMBUS
+ tristate
+ depends on SLIMBUS
+
config REGMAP_SPI
tristate
depends on SPI
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 22d263cca395..5ed0023fabda 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_REGCACHE_COMPRESSED) += regcache-lzo.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
+obj-$(CONFIG_REGMAP_SLIMBUS) += regmap-slimbus.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
new file mode 100644
index 000000000000..c90bee81d954
--- /dev/null
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017, Linaro Ltd.
+
+#include <linux/regmap.h>
+#include <linux/slimbus.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static int regmap_slimbus_byte_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct slim_device *sdev = context;
+ int v;
+
+ v = slim_readb(sdev, reg);
+
+ if (v < 0)
+ return v;
+
+ *val = v;
+
+ return 0;
+}
+
+static int regmap_slimbus_byte_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct slim_device *sdev = context;
+
+ return slim_writeb(sdev, reg, val);
+}
+
+static struct regmap_bus regmap_slimbus_bus = {
+ .reg_write = regmap_slimbus_byte_reg_write,
+ .reg_read = regmap_slimbus_byte_reg_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct regmap_bus *regmap_get_slimbus(struct slim_device *slim,
+ const struct regmap_config *config)
+{
+ if (config->val_bits == 8 && config->reg_bits == 8)
+ return &regmap_slimbus_bus;
+
+ return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
+
+struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 8249762192d5..8c4dd1a3bb6a 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -659,17 +659,31 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
return retval;
}
-static int lp_set_timeout(unsigned int minor, struct timeval *par_timeout)
+static int lp_set_timeout(unsigned int minor, s64 tv_sec, long tv_usec)
{
long to_jiffies;
/* Convert to jiffies, place in lp_table */
- if ((par_timeout->tv_sec < 0) ||
- (par_timeout->tv_usec < 0)) {
+ if (tv_sec < 0 || tv_usec < 0)
return -EINVAL;
+
+ /*
+ * we used to not check, so let's not make this fatal,
+ * but deal with user space passing a 32-bit tv_nsec in
+ * a 64-bit field, capping the timeout to 1 second
+ * worth of microseconds, and capping the total at
+ * MAX_JIFFY_OFFSET.
+ */
+ if (tv_usec > 999999)
+ tv_usec = 999999;
+
+ if (tv_sec >= MAX_SEC_IN_JIFFIES - 1) {
+ to_jiffies = MAX_JIFFY_OFFSET;
+ } else {
+ to_jiffies = DIV_ROUND_UP(tv_usec, 1000000/HZ);
+ to_jiffies += tv_sec * (long) HZ;
}
- to_jiffies = DIV_ROUND_UP(par_timeout->tv_usec, 1000000/HZ);
- to_jiffies += par_timeout->tv_sec * (long) HZ;
+
if (to_jiffies <= 0) {
return -EINVAL;
}
@@ -677,23 +691,43 @@ static int lp_set_timeout(unsigned int minor, struct timeval *par_timeout)
return 0;
}
+static int lp_set_timeout32(unsigned int minor, void __user *arg)
+{
+ s32 karg[2];
+
+ if (copy_from_user(karg, arg, sizeof(karg)))
+ return -EFAULT;
+
+ return lp_set_timeout(minor, karg[0], karg[1]);
+}
+
+static int lp_set_timeout64(unsigned int minor, void __user *arg)
+{
+ s64 karg[2];
+
+ if (copy_from_user(karg, arg, sizeof(karg)))
+ return -EFAULT;
+
+ return lp_set_timeout(minor, karg[0], karg[1]);
+}
+
static long lp_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned int minor;
- struct timeval par_timeout;
int ret;
minor = iminor(file_inode(file));
mutex_lock(&lp_mutex);
switch (cmd) {
- case LPSETTIMEOUT:
- if (copy_from_user(&par_timeout, (void __user *)arg,
- sizeof (struct timeval))) {
- ret = -EFAULT;
+ case LPSETTIMEOUT_OLD:
+ if (BITS_PER_LONG == 32) {
+ ret = lp_set_timeout32(minor, (void __user *)arg);
break;
}
- ret = lp_set_timeout(minor, &par_timeout);
+ /* fallthrough for 64-bit */
+ case LPSETTIMEOUT_NEW:
+ ret = lp_set_timeout64(minor, (void __user *)arg);
break;
default:
ret = lp_do_ioctl(minor, cmd, arg, (void __user *)arg);
@@ -709,18 +743,19 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned int minor;
- struct timeval par_timeout;
int ret;
minor = iminor(file_inode(file));
mutex_lock(&lp_mutex);
switch (cmd) {
- case LPSETTIMEOUT:
- if (compat_get_timeval(&par_timeout, compat_ptr(arg))) {
- ret = -EFAULT;
+ case LPSETTIMEOUT_OLD:
+ if (!COMPAT_USE_64BIT_TIME) {
+ ret = lp_set_timeout32(minor, (void __user *)arg);
break;
}
- ret = lp_set_timeout(minor, &par_timeout);
+ /* fallthrough for x32 mode */
+ case LPSETTIMEOUT_NEW:
+ ret = lp_set_timeout64(minor, (void __user *)arg);
break;
#ifdef LP_STATS
case LPGETSTATS:
@@ -865,7 +900,7 @@ static int __init lp_setup (char *str)
printk(KERN_INFO "lp: too many ports, %s ignored.\n",
str);
} else if (!strcmp(str, "reset")) {
- reset = 1;
+ reset = true;
}
return 1;
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6aefe5370e5b..052011bcf100 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -107,6 +107,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
phys_addr_t p = *ppos;
ssize_t read, sz;
void *ptr;
+ char *bounce;
+ int err;
if (p != *ppos)
return 0;
@@ -129,15 +131,22 @@ static ssize_t read_mem(struct file *file, char __user *buf,
}
#endif
+ bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!bounce)
+ return -ENOMEM;
+
while (count > 0) {
unsigned long remaining;
int allowed;
sz = size_inside_page(p, count);
+ err = -EPERM;
allowed = page_is_allowed(p >> PAGE_SHIFT);
if (!allowed)
- return -EPERM;
+ goto failed;
+
+ err = -EFAULT;
if (allowed == 2) {
/* Show zeros for restricted memory. */
remaining = clear_user(buf, sz);
@@ -149,24 +158,32 @@ static ssize_t read_mem(struct file *file, char __user *buf,
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
- return -EFAULT;
-
- remaining = copy_to_user(buf, ptr, sz);
+ goto failed;
+ err = probe_kernel_read(bounce, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
+ if (err)
+ goto failed;
+
+ remaining = copy_to_user(buf, bounce, sz);
}
if (remaining)
- return -EFAULT;
+ goto failed;
buf += sz;
p += sz;
count -= sz;
read += sz;
}
+ kfree(bounce);
*ppos += read;
return read;
+
+failed:
+ kfree(bounce);
+ return err;
}
static ssize_t write_mem(struct file *file, const char __user *buf,
diff --git a/drivers/char/xillybus/Kconfig b/drivers/char/xillybus/Kconfig
index b302684d86c1..a1f16df08d32 100644
--- a/drivers/char/xillybus/Kconfig
+++ b/drivers/char/xillybus/Kconfig
@@ -4,7 +4,7 @@
config XILLYBUS
tristate "Xillybus generic FPGA interface"
- depends on PCI || (OF_ADDRESS && OF_IRQ)
+ depends on PCI || OF
select CRC32
help
Xillybus is a generic interface for peripherals designed on
@@ -24,7 +24,7 @@ config XILLYBUS_PCIE
config XILLYBUS_OF
tristate "Xillybus over Device Tree"
- depends on OF_ADDRESS && OF_IRQ && HAS_DMA
+ depends on OF && HAS_DMA
help
Set to M if you want Xillybus to find its resources from the
Open Firmware Flattened Device Tree. If the target is an embedded
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index 78a492f5acfb..4d6625ccb48f 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -15,10 +15,6 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/err.h>
#include "xillybus.h"
@@ -123,7 +119,7 @@ static int xilly_drv_probe(struct platform_device *op)
struct xilly_endpoint *endpoint;
int rc;
int irq;
- struct resource res;
+ struct resource *res;
struct xilly_endpoint_hardware *ephw = &of_hw;
if (of_property_read_bool(dev->of_node, "dma-coherent"))
@@ -136,13 +132,13 @@ static int xilly_drv_probe(struct platform_device *op)
dev_set_drvdata(dev, endpoint);
- rc = of_address_to_resource(dev->of_node, 0, &res);
- endpoint->registers = devm_ioremap_resource(dev, &res);
+ res = platform_get_resource(op, IORESOURCE_MEM, 0);
+ endpoint->registers = devm_ioremap_resource(dev, res);
if (IS_ERR(endpoint->registers))
return PTR_ERR(endpoint->registers);
- irq = irq_of_parse_and_map(dev->of_node, 0);
+ irq = platform_get_irq(op, 0);
rc = devm_request_irq(dev, irq, xillybus_isr, 0, xillyname, endpoint);
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index 612afeaec3cb..1e8062f6dbfc 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -75,9 +75,9 @@ static void __init eisa_name_device(struct eisa_device *edev)
static char __init *decode_eisa_sig(unsigned long addr)
{
- static char sig_str[EISA_SIG_LEN];
+ static char sig_str[EISA_SIG_LEN];
u8 sig[4];
- u16 rev;
+ u16 rev;
int i;
for (i = 0; i < 4; i++) {
@@ -96,14 +96,14 @@ static char __init *decode_eisa_sig(unsigned long addr)
if (!i && (sig[0] & 0x80))
return NULL;
}
-
- sig_str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1);
- sig_str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1);
- sig_str[2] = (sig[1] & 0x1f) + ('A' - 1);
- rev = (sig[2] << 8) | sig[3];
- sprintf(sig_str + 3, "%04X", rev);
-
- return sig_str;
+
+ sig_str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1);
+ sig_str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1);
+ sig_str[2] = (sig[1] & 0x1f) + ('A' - 1);
+ rev = (sig[2] << 8) | sig[3];
+ sprintf(sig_str + 3, "%04X", rev);
+
+ return sig_str;
}
static int eisa_bus_match(struct device *dev, struct device_driver *drv)
@@ -198,7 +198,7 @@ static int __init eisa_init_device(struct eisa_root_device *root,
sig = decode_eisa_sig(sig_addr);
if (!sig)
return -1; /* No EISA device here */
-
+
memcpy(edev->id.sig, sig, EISA_SIG_LEN);
edev->slot = slot;
edev->state = inb(SLOT_ADDRESS(root, slot) + EISA_CONFIG_OFFSET)
@@ -222,7 +222,7 @@ static int __init eisa_init_device(struct eisa_root_device *root,
if (is_forced_dev(enable_dev, enable_dev_count, root, edev))
edev->state = EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED;
-
+
if (is_forced_dev(disable_dev, disable_dev_count, root, edev))
edev->state = EISA_CONFIG_FORCED;
@@ -275,7 +275,7 @@ static int __init eisa_request_resources(struct eisa_root_device *root,
edev->res[i].start = edev->res[i].end = 0;
continue;
}
-
+
if (slot) {
edev->res[i].name = NULL;
edev->res[i].start = SLOT_ADDRESS(root, slot)
@@ -295,7 +295,7 @@ static int __init eisa_request_resources(struct eisa_root_device *root,
}
return 0;
-
+
failed:
while (--i >= 0)
release_resource(&edev->res[i]);
@@ -314,7 +314,7 @@ static void __init eisa_release_resources(struct eisa_device *edev)
static int __init eisa_probe(struct eisa_root_device *root)
{
- int i, c;
+ int i, c;
struct eisa_device *edev;
char *enabled_str;
@@ -322,16 +322,14 @@ static int __init eisa_probe(struct eisa_root_device *root)
/* First try to get hold of slot 0. If there is no device
* here, simply fail, unless root->force_probe is set. */
-
+
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev) {
- dev_err(root->dev, "EISA: Couldn't allocate mainboard slot\n");
+ if (!edev)
return -ENOMEM;
- }
-
+
if (eisa_request_resources(root, edev, 0)) {
dev_warn(root->dev,
- "EISA: Cannot allocate resource for mainboard\n");
+ "EISA: Cannot allocate resource for mainboard\n");
kfree(edev);
if (!root->force_probe)
return -EBUSY;
@@ -350,14 +348,14 @@ static int __init eisa_probe(struct eisa_root_device *root)
if (eisa_register_device(edev)) {
dev_err(&edev->dev, "EISA: Failed to register %s\n",
- edev->id.sig);
+ edev->id.sig);
eisa_release_resources(edev);
kfree(edev);
}
-
+
force_probe:
-
- for (c = 0, i = 1; i <= root->slots; i++) {
+
+ for (c = 0, i = 1; i <= root->slots; i++) {
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev) {
dev_err(root->dev, "EISA: Out of memory for slot %d\n",
@@ -367,8 +365,8 @@ static int __init eisa_probe(struct eisa_root_device *root)
if (eisa_request_resources(root, edev, i)) {
dev_warn(root->dev,
- "Cannot allocate resource for EISA slot %d\n",
- i);
+ "Cannot allocate resource for EISA slot %d\n",
+ i);
kfree(edev);
continue;
}
@@ -395,11 +393,11 @@ static int __init eisa_probe(struct eisa_root_device *root)
if (eisa_register_device(edev)) {
dev_err(&edev->dev, "EISA: Failed to register %s\n",
- edev->id.sig);
+ edev->id.sig);
eisa_release_resources(edev);
kfree(edev);
}
- }
+ }
dev_info(root->dev, "EISA: Detected %d card%s\n", c, c == 1 ? "" : "s");
return 0;
@@ -422,7 +420,7 @@ int __init eisa_root_register(struct eisa_root_device *root)
* been already registered. This prevents the virtual root
* device from registering after the real one has, for
* example... */
-
+
root->eisa_root_res.name = eisa_root_res.name;
root->eisa_root_res.start = root->res->start;
root->eisa_root_res.end = root->res->end;
@@ -431,7 +429,7 @@ int __init eisa_root_register(struct eisa_root_device *root)
err = request_resource(&eisa_root_res, &root->eisa_root_res);
if (err)
return err;
-
+
root->bus_nr = eisa_bus_count++;
err = eisa_probe(root);
@@ -444,7 +442,7 @@ int __init eisa_root_register(struct eisa_root_device *root)
static int __init eisa_init(void)
{
int r;
-
+
r = bus_register(&eisa_bus_type);
if (r)
return r;
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index a333bf3517de..b5f367b44413 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -50,11 +50,11 @@ static int __init pci_eisa_init(struct pci_dev *pdev)
return -1;
}
- pci_eisa_root.dev = &pdev->dev;
- pci_eisa_root.res = bus_res;
- pci_eisa_root.bus_base_addr = bus_res->start;
- pci_eisa_root.slots = EISA_MAX_SLOTS;
- pci_eisa_root.dma_mask = pdev->dma_mask;
+ pci_eisa_root.dev = &pdev->dev;
+ pci_eisa_root.res = bus_res;
+ pci_eisa_root.bus_base_addr = bus_res->start;
+ pci_eisa_root.slots = EISA_MAX_SLOTS;
+ pci_eisa_root.dma_mask = pdev->dma_mask;
dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root);
if (eisa_root_register (&pci_eisa_root)) {
diff --git a/drivers/eisa/virtual_root.c b/drivers/eisa/virtual_root.c
index 535e4f9c83f4..f1221c1d6319 100644
--- a/drivers/eisa/virtual_root.c
+++ b/drivers/eisa/virtual_root.c
@@ -35,11 +35,11 @@ static struct platform_device eisa_root_dev = {
};
static struct eisa_root_device eisa_bus_root = {
- .dev = &eisa_root_dev.dev,
- .bus_base_addr = 0,
- .res = &ioport_resource,
- .slots = EISA_MAX_SLOTS,
- .dma_mask = 0xffffffff,
+ .dev = &eisa_root_dev.dev,
+ .bus_base_addr = 0,
+ .res = &ioport_resource,
+ .slots = EISA_MAX_SLOTS,
+ .dma_mask = 0xffffffff,
};
static void virtual_eisa_release (struct device *dev)
@@ -50,13 +50,12 @@ static void virtual_eisa_release (struct device *dev)
static int __init virtual_eisa_root_init (void)
{
int r;
-
- if ((r = platform_device_register (&eisa_root_dev))) {
- return r;
- }
+
+ if ((r = platform_device_register (&eisa_root_dev)))
+ return r;
eisa_bus_root.force_probe = force_probe;
-
+
dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root);
if (eisa_root_register (&eisa_bus_root)) {
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 3877d86c746a..18026354c332 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -144,7 +144,7 @@ static int adc_jack_probe(struct platform_device *pdev)
return err;
data->irq = platform_get_irq(pdev, 0);
- if (!data->irq) {
+ if (data->irq < 0) {
dev_err(&pdev->dev, "platform_get_irq failed\n");
return -ENODEV;
}
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 1621f2f7f129..0a44d43802fe 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -1,6 +1,7 @@
/*
* extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver
*
+ * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2015 Intel Corporation
* Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
*
@@ -97,9 +98,11 @@ struct axp288_extcon_info {
struct device *dev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
+ struct delayed_work det_work;
int irq[EXTCON_IRQ_END];
struct extcon_dev *edev;
unsigned int previous_cable;
+ bool first_detect_done;
};
/* Power up/down reason string array */
@@ -137,6 +140,25 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);
}
+static void axp288_chrg_detect_complete(struct axp288_extcon_info *info)
+{
+ /*
+ * We depend on other drivers to do things like mux the data lines,
+ * enable/disable vbus based on the id-pin, etc. Sometimes the BIOS has
+ * not set these things up correctly resulting in the initial charger
+ * cable type detection giving a wrong result and we end up not charging
+ * or charging at only 0.5A.
+ *
+ * So we schedule a second cable type detection after 2 seconds to
+ * give the other drivers time to load and do their thing.
+ */
+ if (!info->first_detect_done) {
+ queue_delayed_work(system_wq, &info->det_work,
+ msecs_to_jiffies(2000));
+ info->first_detect_done = true;
+ }
+}
+
static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
{
int ret, stat, cfg, pwr_stat;
@@ -183,8 +205,8 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
cable = EXTCON_CHG_USB_DCP;
break;
default:
- dev_warn(info->dev,
- "disconnect or unknown or ID event\n");
+ dev_warn(info->dev, "unknown (reserved) bc detect result\n");
+ cable = EXTCON_CHG_USB_SDP;
}
no_vbus:
@@ -201,6 +223,8 @@ no_vbus:
info->previous_cable = cable;
}
+ axp288_chrg_detect_complete(info);
+
return 0;
dev_det_ret:
@@ -222,8 +246,11 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static void axp288_extcon_enable(struct axp288_extcon_info *info)
+static void axp288_extcon_det_work(struct work_struct *work)
{
+ struct axp288_extcon_info *info =
+ container_of(work, struct axp288_extcon_info, det_work.work);
+
regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
BC_GLOBAL_RUN, 0);
/* Enable the charger detection logic */
@@ -245,6 +272,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
info->previous_cable = EXTCON_NONE;
+ INIT_DELAYED_WORK(&info->det_work, axp288_extcon_det_work);
platform_set_drvdata(pdev, info);
@@ -290,7 +318,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
}
/* Start charger cable type detection */
- axp288_extcon_enable(info);
+ queue_delayed_work(system_wq, &info->det_work, 0);
return 0;
}
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 643411066ad9..227651ff9666 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -266,7 +266,7 @@ static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
static int max77693_muic_set_path(struct max77693_muic_info *info,
u8 val, bool attached)
{
- int ret = 0;
+ int ret;
unsigned int ctrl1, ctrl2 = 0;
if (attached)
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 8152790d72e1..9f30f4929b72 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -204,7 +204,7 @@ static int max8997_muic_set_debounce_time(struct max8997_muic_info *info,
static int max8997_muic_set_path(struct max8997_muic_info *info,
u8 val, bool attached)
{
- int ret = 0;
+ int ret;
u8 ctrl1, ctrl2 = 0;
if (attached)
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index ad5448f718b3..f47ef848bcd0 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -11,25 +11,30 @@ menuconfig FPGA
if FPGA
-config FPGA_REGION
- tristate "FPGA Region"
- depends on OF && FPGA_BRIDGE
+config FPGA_MGR_SOCFPGA
+ tristate "Altera SOCFPGA FPGA Manager"
+ depends on ARCH_SOCFPGA || COMPILE_TEST
help
- FPGA Regions allow loading FPGA images under control of
- the Device Tree.
+ FPGA manager driver support for Altera SOCFPGA.
-config FPGA_MGR_ICE40_SPI
- tristate "Lattice iCE40 SPI"
- depends on OF && SPI
+config FPGA_MGR_SOCFPGA_A10
+ tristate "Altera SoCFPGA Arria10"
+ depends on ARCH_SOCFPGA || COMPILE_TEST
+ select REGMAP_MMIO
help
- FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
+ FPGA manager driver support for Altera Arria10 SoCFPGA.
-config FPGA_MGR_ALTERA_CVP
- tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager"
- depends on PCI
+config ALTERA_PR_IP_CORE
+ tristate "Altera Partial Reconfiguration IP Core"
+ help
+ Core driver support for Altera Partial Reconfiguration IP component
+
+config ALTERA_PR_IP_CORE_PLAT
+ tristate "Platform support of Altera Partial Reconfiguration IP Core"
+ depends on ALTERA_PR_IP_CORE && OF && HAS_IOMEM
help
- FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V
- and Arria 10 Altera FPGAs using the CvP interface over PCIe.
+ Platform driver support for Altera Partial Reconfiguration IP
+ component
config FPGA_MGR_ALTERA_PS_SPI
tristate "Altera FPGA Passive Serial over SPI"
@@ -38,25 +43,19 @@ config FPGA_MGR_ALTERA_PS_SPI
FPGA manager driver support for Altera Arria/Cyclone/Stratix
using the passive serial interface over SPI.
-config FPGA_MGR_SOCFPGA
- tristate "Altera SOCFPGA FPGA Manager"
- depends on ARCH_SOCFPGA || COMPILE_TEST
- help
- FPGA manager driver support for Altera SOCFPGA.
-
-config FPGA_MGR_SOCFPGA_A10
- tristate "Altera SoCFPGA Arria10"
- depends on ARCH_SOCFPGA || COMPILE_TEST
- select REGMAP_MMIO
+config FPGA_MGR_ALTERA_CVP
+ tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager"
+ depends on PCI
help
- FPGA manager driver support for Altera Arria10 SoCFPGA.
+ FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V
+ and Arria 10 Altera FPGAs using the CvP interface over PCIe.
-config FPGA_MGR_TS73XX
- tristate "Technologic Systems TS-73xx SBC FPGA Manager"
- depends on ARCH_EP93XX && MACH_TS72XX
+config FPGA_MGR_ZYNQ_FPGA
+ tristate "Xilinx Zynq FPGA"
+ depends on ARCH_ZYNQ || COMPILE_TEST
+ depends on HAS_DMA
help
- FPGA manager driver support for the Altera Cyclone II FPGA
- present on the TS-73xx SBC boards.
+ FPGA manager driver support for Xilinx Zynq FPGAs.
config FPGA_MGR_XILINX_SPI
tristate "Xilinx Configuration over Slave Serial (SPI)"
@@ -65,16 +64,21 @@ config FPGA_MGR_XILINX_SPI
FPGA manager driver support for Xilinx FPGA configuration
over slave serial interface.
-config FPGA_MGR_ZYNQ_FPGA
- tristate "Xilinx Zynq FPGA"
- depends on ARCH_ZYNQ || COMPILE_TEST
- depends on HAS_DMA
+config FPGA_MGR_ICE40_SPI
+ tristate "Lattice iCE40 SPI"
+ depends on OF && SPI
help
- FPGA manager driver support for Xilinx Zynq FPGAs.
+ FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
+
+config FPGA_MGR_TS73XX
+ tristate "Technologic Systems TS-73xx SBC FPGA Manager"
+ depends on ARCH_EP93XX && MACH_TS72XX
+ help
+ FPGA manager driver support for the Altera Cyclone II FPGA
+ present on the TS-73xx SBC boards.
config FPGA_BRIDGE
tristate "FPGA Bridge Framework"
- depends on OF
help
Say Y here if you want to support bridges connected between host
processors and FPGAs or between FPGAs.
@@ -95,18 +99,6 @@ config ALTERA_FREEZE_BRIDGE
isolate one region of the FPGA from the busses while that
region is being reprogrammed.
-config ALTERA_PR_IP_CORE
- tristate "Altera Partial Reconfiguration IP Core"
- help
- Core driver support for Altera Partial Reconfiguration IP component
-
-config ALTERA_PR_IP_CORE_PLAT
- tristate "Platform support of Altera Partial Reconfiguration IP Core"
- depends on ALTERA_PR_IP_CORE && OF && HAS_IOMEM
- help
- Platform driver support for Altera Partial Reconfiguration IP
- component
-
config XILINX_PR_DECOUPLER
tristate "Xilinx LogiCORE PR Decoupler"
depends on FPGA_BRIDGE
@@ -117,4 +109,19 @@ config XILINX_PR_DECOUPLER
region of the FPGA from the busses while that region is
being reprogrammed during partial reconfig.
+config FPGA_REGION
+ tristate "FPGA Region"
+ depends on FPGA_BRIDGE
+ help
+ FPGA Region common code. A FPGA Region controls a FPGA Manager
+ and the FPGA Bridges associated with either a reconfigurable
+ region of an FPGA or a whole FPGA.
+
+config OF_FPGA_REGION
+ tristate "FPGA Region Device Tree Overlay Support"
+ depends on OF && FPGA_REGION
+ help
+ Support for loading FPGA images by applying a Device Tree
+ overlay.
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index f98dcf1d89e1..3cb276a0f88d 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_XILINX_PR_DECOUPLER) += xilinx-pr-decoupler.o
# High Level Interfaces
obj-$(CONFIG_FPGA_REGION) += fpga-region.o
+obj-$(CONFIG_OF_FPGA_REGION) += of-fpga-region.o
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 9651aa56244a..31bd2c59c305 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -2,6 +2,7 @@
* FPGA Bridge Framework Driver
*
* Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ * Copyright (C) 2017 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -70,32 +71,13 @@ int fpga_bridge_disable(struct fpga_bridge *bridge)
}
EXPORT_SYMBOL_GPL(fpga_bridge_disable);
-/**
- * of_fpga_bridge_get - get an exclusive reference to a fpga bridge
- *
- * @np: node pointer of a FPGA bridge
- * @info: fpga image specific information
- *
- * Return fpga_bridge struct if successful.
- * Return -EBUSY if someone already has a reference to the bridge.
- * Return -ENODEV if @np is not a FPGA Bridge.
- */
-struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
- struct fpga_image_info *info)
-
+static struct fpga_bridge *__fpga_bridge_get(struct device *dev,
+ struct fpga_image_info *info)
{
- struct device *dev;
struct fpga_bridge *bridge;
int ret = -ENODEV;
- dev = class_find_device(fpga_bridge_class, NULL, np,
- fpga_bridge_of_node_match);
- if (!dev)
- goto err_dev;
-
bridge = to_fpga_bridge(dev);
- if (!bridge)
- goto err_dev;
bridge->info = info;
@@ -117,8 +99,58 @@ err_dev:
put_device(dev);
return ERR_PTR(ret);
}
+
+/**
+ * of_fpga_bridge_get - get an exclusive reference to a fpga bridge
+ *
+ * @np: node pointer of a FPGA bridge
+ * @info: fpga image specific information
+ *
+ * Return fpga_bridge struct if successful.
+ * Return -EBUSY if someone already has a reference to the bridge.
+ * Return -ENODEV if @np is not a FPGA Bridge.
+ */
+struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
+ struct fpga_image_info *info)
+{
+ struct device *dev;
+
+ dev = class_find_device(fpga_bridge_class, NULL, np,
+ fpga_bridge_of_node_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_bridge_get(dev, info);
+}
EXPORT_SYMBOL_GPL(of_fpga_bridge_get);
+static int fpga_bridge_dev_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+/**
+ * fpga_bridge_get - get an exclusive reference to a fpga bridge
+ * @dev: parent device that fpga bridge was registered with
+ *
+ * Given a device, get an exclusive reference to a fpga bridge.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_bridge *fpga_bridge_get(struct device *dev,
+ struct fpga_image_info *info)
+{
+ struct device *bridge_dev;
+
+ bridge_dev = class_find_device(fpga_bridge_class, NULL, dev,
+ fpga_bridge_dev_match);
+ if (!bridge_dev)
+ return ERR_PTR(-ENODEV);
+
+ return __fpga_bridge_get(bridge_dev, info);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_get);
+
/**
* fpga_bridge_put - release a reference to a bridge
*
@@ -206,7 +238,7 @@ void fpga_bridges_put(struct list_head *bridge_list)
EXPORT_SYMBOL_GPL(fpga_bridges_put);
/**
- * fpga_bridges_get_to_list - get a bridge, add it to a list
+ * of_fpga_bridge_get_to_list - get a bridge, add it to a list
*
* @np: node pointer of a FPGA bridge
* @info: fpga image specific information
@@ -216,14 +248,44 @@ EXPORT_SYMBOL_GPL(fpga_bridges_put);
*
* Return 0 for success, error code from of_fpga_bridge_get() othewise.
*/
-int fpga_bridge_get_to_list(struct device_node *np,
+int of_fpga_bridge_get_to_list(struct device_node *np,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list)
+{
+ struct fpga_bridge *bridge;
+ unsigned long flags;
+
+ bridge = of_fpga_bridge_get(np, info);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ spin_lock_irqsave(&bridge_list_lock, flags);
+ list_add(&bridge->node, bridge_list);
+ spin_unlock_irqrestore(&bridge_list_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_fpga_bridge_get_to_list);
+
+/**
+ * fpga_bridge_get_to_list - given device, get a bridge, add it to a list
+ *
+ * @dev: FPGA bridge device
+ * @info: fpga image specific information
+ * @bridge_list: list of FPGA bridges
+ *
+ * Get an exclusive reference to the bridge and and it to the list.
+ *
+ * Return 0 for success, error code from fpga_bridge_get() othewise.
+ */
+int fpga_bridge_get_to_list(struct device *dev,
struct fpga_image_info *info,
struct list_head *bridge_list)
{
struct fpga_bridge *bridge;
unsigned long flags;
- bridge = of_fpga_bridge_get(np, info);
+ bridge = fpga_bridge_get(dev, info);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
@@ -303,6 +365,7 @@ int fpga_bridge_register(struct device *dev, const char *name,
bridge->priv = priv;
device_initialize(&bridge->dev);
+ bridge->dev.groups = br_ops->groups;
bridge->dev.class = fpga_bridge_class;
bridge->dev.parent = dev;
bridge->dev.of_node = dev->of_node;
@@ -381,7 +444,7 @@ static void __exit fpga_bridge_dev_exit(void)
}
MODULE_DESCRIPTION("FPGA Bridge Driver");
-MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
MODULE_LICENSE("GPL v2");
subsys_initcall(fpga_bridge_dev_init);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index 188ffefa3cc3..9939d2cbc9a6 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -2,6 +2,7 @@
* FPGA Manager Core
*
* Copyright (C) 2013-2015 Altera Corporation
+ * Copyright (C) 2017 Intel Corporation
*
* With code from the mailing list:
* Copyright (C) 2013 Xilinx, Inc.
@@ -31,6 +32,40 @@
static DEFINE_IDA(fpga_mgr_ida);
static struct class *fpga_mgr_class;
+struct fpga_image_info *fpga_image_info_alloc(struct device *dev)
+{
+ struct fpga_image_info *info;
+
+ get_device(dev);
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ put_device(dev);
+ return NULL;
+ }
+
+ info->dev = dev;
+
+ return info;
+}
+EXPORT_SYMBOL_GPL(fpga_image_info_alloc);
+
+void fpga_image_info_free(struct fpga_image_info *info)
+{
+ struct device *dev;
+
+ if (!info)
+ return;
+
+ dev = info->dev;
+ if (info->firmware_name)
+ devm_kfree(dev, info->firmware_name);
+
+ devm_kfree(dev, info);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(fpga_image_info_free);
+
/*
* Call the low level driver's write_init function. This will do the
* device-specific things to get the FPGA into the state where it is ready to
@@ -137,8 +172,9 @@ static int fpga_mgr_write_complete(struct fpga_manager *mgr,
*
* Return: 0 on success, negative error code otherwise.
*/
-int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
- struct sg_table *sgt)
+static int fpga_mgr_buf_load_sg(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ struct sg_table *sgt)
{
int ret;
@@ -170,7 +206,6 @@ int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
return fpga_mgr_write_complete(mgr, info);
}
-EXPORT_SYMBOL_GPL(fpga_mgr_buf_load_sg);
static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr,
struct fpga_image_info *info,
@@ -210,8 +245,9 @@ static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr,
*
* Return: 0 on success, negative error code otherwise.
*/
-int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
- const char *buf, size_t count)
+static int fpga_mgr_buf_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
{
struct page **pages;
struct sg_table sgt;
@@ -266,7 +302,6 @@ int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
return rc;
}
-EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
/**
* fpga_mgr_firmware_load - request firmware and load to fpga
@@ -282,9 +317,9 @@ EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
*
* Return: 0 on success, negative error code otherwise.
*/
-int fpga_mgr_firmware_load(struct fpga_manager *mgr,
- struct fpga_image_info *info,
- const char *image_name)
+static int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *image_name)
{
struct device *dev = &mgr->dev;
const struct firmware *fw;
@@ -307,7 +342,18 @@ int fpga_mgr_firmware_load(struct fpga_manager *mgr,
return ret;
}
-EXPORT_SYMBOL_GPL(fpga_mgr_firmware_load);
+
+int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info)
+{
+ if (info->sgt)
+ return fpga_mgr_buf_load_sg(mgr, info, info->sgt);
+ if (info->buf && info->count)
+ return fpga_mgr_buf_load(mgr, info, info->buf, info->count);
+ if (info->firmware_name)
+ return fpga_mgr_firmware_load(mgr, info, info->firmware_name);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_load);
static const char * const state_str[] = {
[FPGA_MGR_STATE_UNKNOWN] = "unknown",
@@ -364,28 +410,17 @@ ATTRIBUTE_GROUPS(fpga_mgr);
static struct fpga_manager *__fpga_mgr_get(struct device *dev)
{
struct fpga_manager *mgr;
- int ret = -ENODEV;
mgr = to_fpga_manager(dev);
- if (!mgr)
- goto err_dev;
-
- /* Get exclusive use of fpga manager */
- if (!mutex_trylock(&mgr->ref_mutex)) {
- ret = -EBUSY;
- goto err_dev;
- }
if (!try_module_get(dev->parent->driver->owner))
- goto err_ll_mod;
+ goto err_dev;
return mgr;
-err_ll_mod:
- mutex_unlock(&mgr->ref_mutex);
err_dev:
put_device(dev);
- return ERR_PTR(ret);
+ return ERR_PTR(-ENODEV);
}
static int fpga_mgr_dev_match(struct device *dev, const void *data)
@@ -394,10 +429,10 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data)
}
/**
- * fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * fpga_mgr_get - get a reference to a fpga mgr
* @dev: parent device that fpga mgr was registered with
*
- * Given a device, get an exclusive reference to a fpga mgr.
+ * Given a device, get a reference to a fpga mgr.
*
* Return: fpga manager struct or IS_ERR() condition containing error code.
*/
@@ -418,10 +453,10 @@ static int fpga_mgr_of_node_match(struct device *dev, const void *data)
}
/**
- * of_fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * of_fpga_mgr_get - get a reference to a fpga mgr
* @node: device node
*
- * Given a device node, get an exclusive reference to a fpga mgr.
+ * Given a device node, get a reference to a fpga mgr.
*
* Return: fpga manager struct or IS_ERR() condition containing error code.
*/
@@ -445,12 +480,41 @@ EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
void fpga_mgr_put(struct fpga_manager *mgr)
{
module_put(mgr->dev.parent->driver->owner);
- mutex_unlock(&mgr->ref_mutex);
put_device(&mgr->dev);
}
EXPORT_SYMBOL_GPL(fpga_mgr_put);
/**
+ * fpga_mgr_lock - Lock FPGA manager for exclusive use
+ * @mgr: fpga manager
+ *
+ * Given a pointer to FPGA Manager (from fpga_mgr_get() or
+ * of_fpga_mgr_put()) attempt to get the mutex.
+ *
+ * Return: 0 for success or -EBUSY
+ */
+int fpga_mgr_lock(struct fpga_manager *mgr)
+{
+ if (!mutex_trylock(&mgr->ref_mutex)) {
+ dev_err(&mgr->dev, "FPGA manager is in use.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_lock);
+
+/**
+ * fpga_mgr_unlock - Unlock FPGA manager
+ * @mgr: fpga manager
+ */
+void fpga_mgr_unlock(struct fpga_manager *mgr)
+{
+ mutex_unlock(&mgr->ref_mutex);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
+
+/**
* fpga_mgr_register - register a low level fpga manager driver
* @dev: fpga manager device from pdev
* @name: fpga manager name
@@ -503,6 +567,7 @@ int fpga_mgr_register(struct device *dev, const char *name,
device_initialize(&mgr->dev);
mgr->dev.class = fpga_mgr_class;
+ mgr->dev.groups = mops->groups;
mgr->dev.parent = dev;
mgr->dev.of_node = dev->of_node;
mgr->dev.id = id;
@@ -578,7 +643,7 @@ static void __exit fpga_mgr_class_exit(void)
ida_destroy(&fpga_mgr_ida);
}
-MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
MODULE_DESCRIPTION("FPGA manager framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index d9ab7c75b14f..edab2a2e03ef 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -2,6 +2,7 @@
* FPGA Region - Device Tree support for FPGA programming under Linux
*
* Copyright (C) 2013-2016 Altera Corporation
+ * Copyright (C) 2017 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -18,61 +19,30 @@
#include <linux/fpga/fpga-bridge.h>
#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-region.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-/**
- * struct fpga_region - FPGA Region structure
- * @dev: FPGA Region device
- * @mutex: enforces exclusive reference to region
- * @bridge_list: list of FPGA bridges specified in region
- * @info: fpga image specific information
- */
-struct fpga_region {
- struct device dev;
- struct mutex mutex; /* for exclusive reference to region */
- struct list_head bridge_list;
- struct fpga_image_info *info;
-};
-
-#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
-
static DEFINE_IDA(fpga_region_ida);
static struct class *fpga_region_class;
-static const struct of_device_id fpga_region_of_match[] = {
- { .compatible = "fpga-region", },
- {},
-};
-MODULE_DEVICE_TABLE(of, fpga_region_of_match);
-
-static int fpga_region_of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
-/**
- * fpga_region_find - find FPGA region
- * @np: device node of FPGA Region
- * Caller will need to put_device(&region->dev) when done.
- * Returns FPGA Region struct or NULL
- */
-static struct fpga_region *fpga_region_find(struct device_node *np)
+struct fpga_region *fpga_region_class_find(
+ struct device *start, const void *data,
+ int (*match)(struct device *, const void *))
{
struct device *dev;
- dev = class_find_device(fpga_region_class, NULL, np,
- fpga_region_of_node_match);
+ dev = class_find_device(fpga_region_class, start, data, match);
if (!dev)
return NULL;
return to_fpga_region(dev);
}
+EXPORT_SYMBOL_GPL(fpga_region_class_find);
/**
* fpga_region_get - get an exclusive reference to a fpga region
@@ -94,15 +64,13 @@ static struct fpga_region *fpga_region_get(struct fpga_region *region)
}
get_device(dev);
- of_node_get(dev->of_node);
if (!try_module_get(dev->parent->driver->owner)) {
- of_node_put(dev->of_node);
put_device(dev);
mutex_unlock(&region->mutex);
return ERR_PTR(-ENODEV);
}
- dev_dbg(&region->dev, "get\n");
+ dev_dbg(dev, "get\n");
return region;
}
@@ -116,403 +84,99 @@ static void fpga_region_put(struct fpga_region *region)
{
struct device *dev = &region->dev;
- dev_dbg(&region->dev, "put\n");
+ dev_dbg(dev, "put\n");
module_put(dev->parent->driver->owner);
- of_node_put(dev->of_node);
put_device(dev);
mutex_unlock(&region->mutex);
}
/**
- * fpga_region_get_manager - get exclusive reference for FPGA manager
- * @region: FPGA region
- *
- * Get FPGA Manager from "fpga-mgr" property or from ancestor region.
- *
- * Caller should call fpga_mgr_put() when done with manager.
- *
- * Return: fpga manager struct or IS_ERR() condition containing error code.
- */
-static struct fpga_manager *fpga_region_get_manager(struct fpga_region *region)
-{
- struct device *dev = &region->dev;
- struct device_node *np = dev->of_node;
- struct device_node *mgr_node;
- struct fpga_manager *mgr;
-
- of_node_get(np);
- while (np) {
- if (of_device_is_compatible(np, "fpga-region")) {
- mgr_node = of_parse_phandle(np, "fpga-mgr", 0);
- if (mgr_node) {
- mgr = of_fpga_mgr_get(mgr_node);
- of_node_put(np);
- return mgr;
- }
- }
- np = of_get_next_parent(np);
- }
- of_node_put(np);
-
- return ERR_PTR(-EINVAL);
-}
-
-/**
- * fpga_region_get_bridges - create a list of bridges
- * @region: FPGA region
- * @overlay: device node of the overlay
- *
- * Create a list of bridges including the parent bridge and the bridges
- * specified by "fpga-bridges" property. Note that the
- * fpga_bridges_enable/disable/put functions are all fine with an empty list
- * if that happens.
- *
- * Caller should call fpga_bridges_put(&region->bridge_list) when
- * done with the bridges.
- *
- * Return 0 for success (even if there are no bridges specified)
- * or -EBUSY if any of the bridges are in use.
- */
-static int fpga_region_get_bridges(struct fpga_region *region,
- struct device_node *overlay)
-{
- struct device *dev = &region->dev;
- struct device_node *region_np = dev->of_node;
- struct device_node *br, *np, *parent_br = NULL;
- int i, ret;
-
- /* If parent is a bridge, add to list */
- ret = fpga_bridge_get_to_list(region_np->parent, region->info,
- &region->bridge_list);
- if (ret == -EBUSY)
- return ret;
-
- if (!ret)
- parent_br = region_np->parent;
-
- /* If overlay has a list of bridges, use it. */
- if (of_parse_phandle(overlay, "fpga-bridges", 0))
- np = overlay;
- else
- np = region_np;
-
- for (i = 0; ; i++) {
- br = of_parse_phandle(np, "fpga-bridges", i);
- if (!br)
- break;
-
- /* If parent bridge is in list, skip it. */
- if (br == parent_br)
- continue;
-
- /* If node is a bridge, get it and add to list */
- ret = fpga_bridge_get_to_list(br, region->info,
- &region->bridge_list);
-
- /* If any of the bridges are in use, give up */
- if (ret == -EBUSY) {
- fpga_bridges_put(&region->bridge_list);
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
-/**
* fpga_region_program_fpga - program FPGA
* @region: FPGA region
- * @firmware_name: name of FPGA image firmware file
- * @overlay: device node of the overlay
- * Program an FPGA using information in the device tree.
- * Function assumes that there is a firmware-name property.
+ * Program an FPGA using fpga image info (region->info).
* Return 0 for success or negative error code.
*/
-static int fpga_region_program_fpga(struct fpga_region *region,
- const char *firmware_name,
- struct device_node *overlay)
+int fpga_region_program_fpga(struct fpga_region *region)
{
- struct fpga_manager *mgr;
+ struct device *dev = &region->dev;
+ struct fpga_image_info *info = region->info;
int ret;
region = fpga_region_get(region);
if (IS_ERR(region)) {
- pr_err("failed to get fpga region\n");
+ dev_err(dev, "failed to get FPGA region\n");
return PTR_ERR(region);
}
- mgr = fpga_region_get_manager(region);
- if (IS_ERR(mgr)) {
- pr_err("failed to get fpga region manager\n");
- ret = PTR_ERR(mgr);
+ ret = fpga_mgr_lock(region->mgr);
+ if (ret) {
+ dev_err(dev, "FPGA manager is busy\n");
goto err_put_region;
}
- ret = fpga_region_get_bridges(region, overlay);
- if (ret) {
- pr_err("failed to get fpga region bridges\n");
- goto err_put_mgr;
+ /*
+ * In some cases, we already have a list of bridges in the
+ * fpga region struct. Or we don't have any bridges.
+ */
+ if (region->get_bridges) {
+ ret = region->get_bridges(region);
+ if (ret) {
+ dev_err(dev, "failed to get fpga region bridges\n");
+ goto err_unlock_mgr;
+ }
}
ret = fpga_bridges_disable(&region->bridge_list);
if (ret) {
- pr_err("failed to disable region bridges\n");
+ dev_err(dev, "failed to disable bridges\n");
goto err_put_br;
}
- ret = fpga_mgr_firmware_load(mgr, region->info, firmware_name);
+ ret = fpga_mgr_load(region->mgr, info);
if (ret) {
- pr_err("failed to load fpga image\n");
+ dev_err(dev, "failed to load FPGA image\n");
goto err_put_br;
}
ret = fpga_bridges_enable(&region->bridge_list);
if (ret) {
- pr_err("failed to enable region bridges\n");
+ dev_err(dev, "failed to enable region bridges\n");
goto err_put_br;
}
- fpga_mgr_put(mgr);
+ fpga_mgr_unlock(region->mgr);
fpga_region_put(region);
return 0;
err_put_br:
- fpga_bridges_put(&region->bridge_list);
-err_put_mgr:
- fpga_mgr_put(mgr);
+ if (region->get_bridges)
+ fpga_bridges_put(&region->bridge_list);
+err_unlock_mgr:
+ fpga_mgr_unlock(region->mgr);
err_put_region:
fpga_region_put(region);
return ret;
}
+EXPORT_SYMBOL_GPL(fpga_region_program_fpga);
-/**
- * child_regions_with_firmware
- * @overlay: device node of the overlay
- *
- * If the overlay adds child FPGA regions, they are not allowed to have
- * firmware-name property.
- *
- * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name.
- */
-static int child_regions_with_firmware(struct device_node *overlay)
-{
- struct device_node *child_region;
- const char *child_firmware_name;
- int ret = 0;
-
- of_node_get(overlay);
-
- child_region = of_find_matching_node(overlay, fpga_region_of_match);
- while (child_region) {
- if (!of_property_read_string(child_region, "firmware-name",
- &child_firmware_name)) {
- ret = -EINVAL;
- break;
- }
- child_region = of_find_matching_node(child_region,
- fpga_region_of_match);
- }
-
- of_node_put(child_region);
-
- if (ret)
- pr_err("firmware-name not allowed in child FPGA region: %pOF",
- child_region);
-
- return ret;
-}
-
-/**
- * fpga_region_notify_pre_apply - pre-apply overlay notification
- *
- * @region: FPGA region that the overlay was applied to
- * @nd: overlay notification data
- *
- * Called after when an overlay targeted to a FPGA Region is about to be
- * applied. Function will check the properties that will be added to the FPGA
- * region. If the checks pass, it will program the FPGA.
- *
- * The checks are:
- * The overlay must add either firmware-name or external-fpga-config property
- * to the FPGA Region.
- *
- * firmware-name : program the FPGA
- * external-fpga-config : FPGA is already programmed
- * encrypted-fpga-config : FPGA bitstream is encrypted
- *
- * The overlay can add other FPGA regions, but child FPGA regions cannot have a
- * firmware-name property since those regions don't exist yet.
- *
- * If the overlay that breaks the rules, notifier returns an error and the
- * overlay is rejected before it goes into the main tree.
- *
- * Returns 0 for success or negative error code for failure.
- */
-static int fpga_region_notify_pre_apply(struct fpga_region *region,
- struct of_overlay_notify_data *nd)
+int fpga_region_register(struct device *dev, struct fpga_region *region)
{
- const char *firmware_name = NULL;
- struct fpga_image_info *info;
- int ret;
-
- info = devm_kzalloc(&region->dev, sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- region->info = info;
-
- /* Reject overlay if child FPGA Regions have firmware-name property */
- ret = child_regions_with_firmware(nd->overlay);
- if (ret)
- return ret;
-
- /* Read FPGA region properties from the overlay */
- if (of_property_read_bool(nd->overlay, "partial-fpga-config"))
- info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
-
- if (of_property_read_bool(nd->overlay, "external-fpga-config"))
- info->flags |= FPGA_MGR_EXTERNAL_CONFIG;
-
- if (of_property_read_bool(nd->overlay, "encrypted-fpga-config"))
- info->flags |= FPGA_MGR_ENCRYPTED_BITSTREAM;
-
- of_property_read_string(nd->overlay, "firmware-name", &firmware_name);
-
- of_property_read_u32(nd->overlay, "region-unfreeze-timeout-us",
- &info->enable_timeout_us);
-
- of_property_read_u32(nd->overlay, "region-freeze-timeout-us",
- &info->disable_timeout_us);
-
- of_property_read_u32(nd->overlay, "config-complete-timeout-us",
- &info->config_complete_timeout_us);
-
- /* If FPGA was externally programmed, don't specify firmware */
- if ((info->flags & FPGA_MGR_EXTERNAL_CONFIG) && firmware_name) {
- pr_err("error: specified firmware and external-fpga-config");
- return -EINVAL;
- }
-
- /* FPGA is already configured externally. We're done. */
- if (info->flags & FPGA_MGR_EXTERNAL_CONFIG)
- return 0;
-
- /* If we got this far, we should be programming the FPGA */
- if (!firmware_name) {
- pr_err("should specify firmware-name or external-fpga-config\n");
- return -EINVAL;
- }
-
- return fpga_region_program_fpga(region, firmware_name, nd->overlay);
-}
-
-/**
- * fpga_region_notify_post_remove - post-remove overlay notification
- *
- * @region: FPGA region that was targeted by the overlay that was removed
- * @nd: overlay notification data
- *
- * Called after an overlay has been removed if the overlay's target was a
- * FPGA region.
- */
-static void fpga_region_notify_post_remove(struct fpga_region *region,
- struct of_overlay_notify_data *nd)
-{
- fpga_bridges_disable(&region->bridge_list);
- fpga_bridges_put(&region->bridge_list);
- devm_kfree(&region->dev, region->info);
- region->info = NULL;
-}
-
-/**
- * of_fpga_region_notify - reconfig notifier for dynamic DT changes
- * @nb: notifier block
- * @action: notifier action
- * @arg: reconfig data
- *
- * This notifier handles programming a FPGA when a "firmware-name" property is
- * added to a fpga-region.
- *
- * Returns NOTIFY_OK or error if FPGA programming fails.
- */
-static int of_fpga_region_notify(struct notifier_block *nb,
- unsigned long action, void *arg)
-{
- struct of_overlay_notify_data *nd = arg;
- struct fpga_region *region;
- int ret;
-
- switch (action) {
- case OF_OVERLAY_PRE_APPLY:
- pr_debug("%s OF_OVERLAY_PRE_APPLY\n", __func__);
- break;
- case OF_OVERLAY_POST_APPLY:
- pr_debug("%s OF_OVERLAY_POST_APPLY\n", __func__);
- return NOTIFY_OK; /* not for us */
- case OF_OVERLAY_PRE_REMOVE:
- pr_debug("%s OF_OVERLAY_PRE_REMOVE\n", __func__);
- return NOTIFY_OK; /* not for us */
- case OF_OVERLAY_POST_REMOVE:
- pr_debug("%s OF_OVERLAY_POST_REMOVE\n", __func__);
- break;
- default: /* should not happen */
- return NOTIFY_OK;
- }
-
- region = fpga_region_find(nd->target);
- if (!region)
- return NOTIFY_OK;
-
- ret = 0;
- switch (action) {
- case OF_OVERLAY_PRE_APPLY:
- ret = fpga_region_notify_pre_apply(region, nd);
- break;
-
- case OF_OVERLAY_POST_REMOVE:
- fpga_region_notify_post_remove(region, nd);
- break;
- }
-
- put_device(&region->dev);
-
- if (ret)
- return notifier_from_errno(ret);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block fpga_region_of_nb = {
- .notifier_call = of_fpga_region_notify,
-};
-
-static int fpga_region_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct fpga_region *region;
int id, ret = 0;
- region = kzalloc(sizeof(*region), GFP_KERNEL);
- if (!region)
- return -ENOMEM;
-
id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
- goto err_kfree;
- }
+ if (id < 0)
+ return id;
mutex_init(&region->mutex);
INIT_LIST_HEAD(&region->bridge_list);
-
device_initialize(&region->dev);
+ region->dev.groups = region->groups;
region->dev.class = fpga_region_class;
region->dev.parent = dev;
- region->dev.of_node = np;
+ region->dev.of_node = dev->of_node;
region->dev.id = id;
dev_set_drvdata(dev, region);
@@ -524,44 +188,27 @@ static int fpga_region_probe(struct platform_device *pdev)
if (ret)
goto err_remove;
- of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
-
- dev_info(dev, "FPGA Region probed\n");
-
return 0;
err_remove:
ida_simple_remove(&fpga_region_ida, id);
-err_kfree:
- kfree(region);
-
return ret;
}
+EXPORT_SYMBOL_GPL(fpga_region_register);
-static int fpga_region_remove(struct platform_device *pdev)
+int fpga_region_unregister(struct fpga_region *region)
{
- struct fpga_region *region = platform_get_drvdata(pdev);
-
device_unregister(&region->dev);
return 0;
}
-
-static struct platform_driver fpga_region_driver = {
- .probe = fpga_region_probe,
- .remove = fpga_region_remove,
- .driver = {
- .name = "fpga-region",
- .of_match_table = of_match_ptr(fpga_region_of_match),
- },
-};
+EXPORT_SYMBOL_GPL(fpga_region_unregister);
static void fpga_region_dev_release(struct device *dev)
{
struct fpga_region *region = to_fpga_region(dev);
ida_simple_remove(&fpga_region_ida, region->dev.id);
- kfree(region);
}
/**
@@ -570,36 +217,17 @@ static void fpga_region_dev_release(struct device *dev)
*/
static int __init fpga_region_init(void)
{
- int ret;
-
fpga_region_class = class_create(THIS_MODULE, "fpga_region");
if (IS_ERR(fpga_region_class))
return PTR_ERR(fpga_region_class);
fpga_region_class->dev_release = fpga_region_dev_release;
- ret = of_overlay_notifier_register(&fpga_region_of_nb);
- if (ret)
- goto err_class;
-
- ret = platform_driver_register(&fpga_region_driver);
- if (ret)
- goto err_plat;
-
return 0;
-
-err_plat:
- of_overlay_notifier_unregister(&fpga_region_of_nb);
-err_class:
- class_destroy(fpga_region_class);
- ida_destroy(&fpga_region_ida);
- return ret;
}
static void __exit fpga_region_exit(void)
{
- platform_driver_unregister(&fpga_region_driver);
- of_overlay_notifier_unregister(&fpga_region_of_nb);
class_destroy(fpga_region_class);
ida_destroy(&fpga_region_ida);
}
@@ -608,5 +236,5 @@ subsys_initcall(fpga_region_init);
module_exit(fpga_region_exit);
MODULE_DESCRIPTION("FPGA Region");
-MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
new file mode 100644
index 000000000000..119ff75522f1
--- /dev/null
+++ b/drivers/fpga/of-fpga-region.c
@@ -0,0 +1,504 @@
+/*
+ * FPGA Region - Device Tree support for FPGA programming under Linux
+ *
+ * Copyright (C) 2013-2016 Altera Corporation
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-region.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+static const struct of_device_id fpga_region_of_match[] = {
+ { .compatible = "fpga-region", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fpga_region_of_match);
+
+static int fpga_region_of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/**
+ * of_fpga_region_find - find FPGA region
+ * @np: device node of FPGA Region
+ *
+ * Caller will need to put_device(&region->dev) when done.
+ *
+ * Returns FPGA Region struct or NULL
+ */
+static struct fpga_region *of_fpga_region_find(struct device_node *np)
+{
+ return fpga_region_class_find(NULL, np, fpga_region_of_node_match);
+}
+
+/**
+ * of_fpga_region_get_mgr - get reference for FPGA manager
+ * @np: device node of FPGA region
+ *
+ * Get FPGA Manager from "fpga-mgr" property or from ancestor region.
+ *
+ * Caller should call fpga_mgr_put() when done with manager.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+static struct fpga_manager *of_fpga_region_get_mgr(struct device_node *np)
+{
+ struct device_node *mgr_node;
+ struct fpga_manager *mgr;
+
+ of_node_get(np);
+ while (np) {
+ if (of_device_is_compatible(np, "fpga-region")) {
+ mgr_node = of_parse_phandle(np, "fpga-mgr", 0);
+ if (mgr_node) {
+ mgr = of_fpga_mgr_get(mgr_node);
+ of_node_put(mgr_node);
+ of_node_put(np);
+ return mgr;
+ }
+ }
+ np = of_get_next_parent(np);
+ }
+ of_node_put(np);
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * of_fpga_region_get_bridges - create a list of bridges
+ * @region: FPGA region
+ *
+ * Create a list of bridges including the parent bridge and the bridges
+ * specified by "fpga-bridges" property. Note that the
+ * fpga_bridges_enable/disable/put functions are all fine with an empty list
+ * if that happens.
+ *
+ * Caller should call fpga_bridges_put(&region->bridge_list) when
+ * done with the bridges.
+ *
+ * Return 0 for success (even if there are no bridges specified)
+ * or -EBUSY if any of the bridges are in use.
+ */
+static int of_fpga_region_get_bridges(struct fpga_region *region)
+{
+ struct device *dev = &region->dev;
+ struct device_node *region_np = dev->of_node;
+ struct fpga_image_info *info = region->info;
+ struct device_node *br, *np, *parent_br = NULL;
+ int i, ret;
+
+ /* If parent is a bridge, add to list */
+ ret = of_fpga_bridge_get_to_list(region_np->parent, info,
+ &region->bridge_list);
+
+ /* -EBUSY means parent is a bridge that is under use. Give up. */
+ if (ret == -EBUSY)
+ return ret;
+
+ /* Zero return code means parent was a bridge and was added to list. */
+ if (!ret)
+ parent_br = region_np->parent;
+
+ /* If overlay has a list of bridges, use it. */
+ br = of_parse_phandle(info->overlay, "fpga-bridges", 0);
+ if (br) {
+ of_node_put(br);
+ np = info->overlay;
+ } else {
+ np = region_np;
+ }
+
+ for (i = 0; ; i++) {
+ br = of_parse_phandle(np, "fpga-bridges", i);
+ if (!br)
+ break;
+
+ /* If parent bridge is in list, skip it. */
+ if (br == parent_br) {
+ of_node_put(br);
+ continue;
+ }
+
+ /* If node is a bridge, get it and add to list */
+ ret = of_fpga_bridge_get_to_list(br, info,
+ &region->bridge_list);
+ of_node_put(br);
+
+ /* If any of the bridges are in use, give up */
+ if (ret == -EBUSY) {
+ fpga_bridges_put(&region->bridge_list);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * child_regions_with_firmware
+ * @overlay: device node of the overlay
+ *
+ * If the overlay adds child FPGA regions, they are not allowed to have
+ * firmware-name property.
+ *
+ * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name.
+ */
+static int child_regions_with_firmware(struct device_node *overlay)
+{
+ struct device_node *child_region;
+ const char *child_firmware_name;
+ int ret = 0;
+
+ of_node_get(overlay);
+
+ child_region = of_find_matching_node(overlay, fpga_region_of_match);
+ while (child_region) {
+ if (!of_property_read_string(child_region, "firmware-name",
+ &child_firmware_name)) {
+ ret = -EINVAL;
+ break;
+ }
+ child_region = of_find_matching_node(child_region,
+ fpga_region_of_match);
+ }
+
+ of_node_put(child_region);
+
+ if (ret)
+ pr_err("firmware-name not allowed in child FPGA region: %pOF",
+ child_region);
+
+ return ret;
+}
+
+/**
+ * of_fpga_region_parse_ov - parse and check overlay applied to region
+ *
+ * @region: FPGA region
+ * @overlay: overlay applied to the FPGA region
+ *
+ * Given an overlay applied to a FPGA region, parse the FPGA image specific
+ * info in the overlay and do some checking.
+ *
+ * Returns:
+ * NULL if overlay doesn't direct us to program the FPGA.
+ * fpga_image_info struct if there is an image to program.
+ * error code for invalid overlay.
+ */
+static struct fpga_image_info *of_fpga_region_parse_ov(
+ struct fpga_region *region,
+ struct device_node *overlay)
+{
+ struct device *dev = &region->dev;
+ struct fpga_image_info *info;
+ const char *firmware_name;
+ int ret;
+
+ if (region->info) {
+ dev_err(dev, "Region already has overlay applied.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Reject overlay if child FPGA Regions added in the overlay have
+ * firmware-name property (would mean that an FPGA region that has
+ * not been added to the live tree yet is doing FPGA programming).
+ */
+ ret = child_regions_with_firmware(overlay);
+ if (ret)
+ return ERR_PTR(ret);
+
+ info = fpga_image_info_alloc(dev);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ info->overlay = overlay;
+
+ /* Read FPGA region properties from the overlay */
+ if (of_property_read_bool(overlay, "partial-fpga-config"))
+ info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+ if (of_property_read_bool(overlay, "external-fpga-config"))
+ info->flags |= FPGA_MGR_EXTERNAL_CONFIG;
+
+ if (of_property_read_bool(overlay, "encrypted-fpga-config"))
+ info->flags |= FPGA_MGR_ENCRYPTED_BITSTREAM;
+
+ if (!of_property_read_string(overlay, "firmware-name",
+ &firmware_name)) {
+ info->firmware_name = devm_kstrdup(dev, firmware_name,
+ GFP_KERNEL);
+ if (!info->firmware_name)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ of_property_read_u32(overlay, "region-unfreeze-timeout-us",
+ &info->enable_timeout_us);
+
+ of_property_read_u32(overlay, "region-freeze-timeout-us",
+ &info->disable_timeout_us);
+
+ of_property_read_u32(overlay, "config-complete-timeout-us",
+ &info->config_complete_timeout_us);
+
+ /* If overlay is not programming the FPGA, don't need FPGA image info */
+ if (!info->firmware_name) {
+ ret = 0;
+ goto ret_no_info;
+ }
+
+ /*
+ * If overlay informs us FPGA was externally programmed, specifying
+ * firmware here would be ambiguous.
+ */
+ if (info->flags & FPGA_MGR_EXTERNAL_CONFIG) {
+ dev_err(dev, "error: specified firmware and external-fpga-config");
+ ret = -EINVAL;
+ goto ret_no_info;
+ }
+
+ return info;
+ret_no_info:
+ fpga_image_info_free(info);
+ return ERR_PTR(ret);
+}
+
+/**
+ * of_fpga_region_notify_pre_apply - pre-apply overlay notification
+ *
+ * @region: FPGA region that the overlay was applied to
+ * @nd: overlay notification data
+ *
+ * Called when an overlay targeted to a FPGA Region is about to be applied.
+ * Parses the overlay for properties that influence how the FPGA will be
+ * programmed and does some checking. If the checks pass, programs the FPGA.
+ * If the checks fail, overlay is rejected and does not get added to the
+ * live tree.
+ *
+ * Returns 0 for success or negative error code for failure.
+ */
+static int of_fpga_region_notify_pre_apply(struct fpga_region *region,
+ struct of_overlay_notify_data *nd)
+{
+ struct device *dev = &region->dev;
+ struct fpga_image_info *info;
+ int ret;
+
+ info = of_fpga_region_parse_ov(region, nd->overlay);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ /* If overlay doesn't program the FPGA, accept it anyway. */
+ if (!info)
+ return 0;
+
+ if (region->info) {
+ dev_err(dev, "Region already has overlay applied.\n");
+ return -EINVAL;
+ }
+
+ region->info = info;
+ ret = fpga_region_program_fpga(region);
+ if (ret) {
+ /* error; reject overlay */
+ fpga_image_info_free(info);
+ region->info = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * of_fpga_region_notify_post_remove - post-remove overlay notification
+ *
+ * @region: FPGA region that was targeted by the overlay that was removed
+ * @nd: overlay notification data
+ *
+ * Called after an overlay has been removed if the overlay's target was a
+ * FPGA region.
+ */
+static void of_fpga_region_notify_post_remove(struct fpga_region *region,
+ struct of_overlay_notify_data *nd)
+{
+ fpga_bridges_disable(&region->bridge_list);
+ fpga_bridges_put(&region->bridge_list);
+ fpga_image_info_free(region->info);
+ region->info = NULL;
+}
+
+/**
+ * of_fpga_region_notify - reconfig notifier for dynamic DT changes
+ * @nb: notifier block
+ * @action: notifier action
+ * @arg: reconfig data
+ *
+ * This notifier handles programming a FPGA when a "firmware-name" property is
+ * added to a fpga-region.
+ *
+ * Returns NOTIFY_OK or error if FPGA programming fails.
+ */
+static int of_fpga_region_notify(struct notifier_block *nb,
+ unsigned long action, void *arg)
+{
+ struct of_overlay_notify_data *nd = arg;
+ struct fpga_region *region;
+ int ret;
+
+ switch (action) {
+ case OF_OVERLAY_PRE_APPLY:
+ pr_debug("%s OF_OVERLAY_PRE_APPLY\n", __func__);
+ break;
+ case OF_OVERLAY_POST_APPLY:
+ pr_debug("%s OF_OVERLAY_POST_APPLY\n", __func__);
+ return NOTIFY_OK; /* not for us */
+ case OF_OVERLAY_PRE_REMOVE:
+ pr_debug("%s OF_OVERLAY_PRE_REMOVE\n", __func__);
+ return NOTIFY_OK; /* not for us */
+ case OF_OVERLAY_POST_REMOVE:
+ pr_debug("%s OF_OVERLAY_POST_REMOVE\n", __func__);
+ break;
+ default: /* should not happen */
+ return NOTIFY_OK;
+ }
+
+ region = of_fpga_region_find(nd->target);
+ if (!region)
+ return NOTIFY_OK;
+
+ ret = 0;
+ switch (action) {
+ case OF_OVERLAY_PRE_APPLY:
+ ret = of_fpga_region_notify_pre_apply(region, nd);
+ break;
+
+ case OF_OVERLAY_POST_REMOVE:
+ of_fpga_region_notify_post_remove(region, nd);
+ break;
+ }
+
+ put_device(&region->dev);
+
+ if (ret)
+ return notifier_from_errno(ret);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpga_region_of_nb = {
+ .notifier_call = of_fpga_region_notify,
+};
+
+static int of_fpga_region_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct fpga_region *region;
+ struct fpga_manager *mgr;
+ int ret;
+
+ /* Find the FPGA mgr specified by region or parent region. */
+ mgr = of_fpga_region_get_mgr(np);
+ if (IS_ERR(mgr))
+ return -EPROBE_DEFER;
+
+ region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ ret = -ENOMEM;
+ goto eprobe_mgr_put;
+ }
+
+ region->mgr = mgr;
+
+ /* Specify how to get bridges for this type of region. */
+ region->get_bridges = of_fpga_region_get_bridges;
+
+ ret = fpga_region_register(dev, region);
+ if (ret)
+ goto eprobe_mgr_put;
+
+ of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
+
+ dev_info(dev, "FPGA Region probed\n");
+
+ return 0;
+
+eprobe_mgr_put:
+ fpga_mgr_put(mgr);
+ return ret;
+}
+
+static int of_fpga_region_remove(struct platform_device *pdev)
+{
+ struct fpga_region *region = platform_get_drvdata(pdev);
+
+ fpga_region_unregister(region);
+ fpga_mgr_put(region->mgr);
+
+ return 0;
+}
+
+static struct platform_driver of_fpga_region_driver = {
+ .probe = of_fpga_region_probe,
+ .remove = of_fpga_region_remove,
+ .driver = {
+ .name = "of-fpga-region",
+ .of_match_table = of_match_ptr(fpga_region_of_match),
+ },
+};
+
+/**
+ * fpga_region_init - init function for fpga_region class
+ * Creates the fpga_region class and registers a reconfig notifier.
+ */
+static int __init of_fpga_region_init(void)
+{
+ int ret;
+
+ ret = of_overlay_notifier_register(&fpga_region_of_nb);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&of_fpga_region_driver);
+ if (ret)
+ goto err_plat;
+
+ return 0;
+
+err_plat:
+ of_overlay_notifier_unregister(&fpga_region_of_nb);
+ return ret;
+}
+
+static void __exit of_fpga_region_exit(void)
+{
+ platform_driver_unregister(&of_fpga_region_driver);
+ of_overlay_notifier_unregister(&fpga_region_of_nb);
+}
+
+subsys_initcall(of_fpga_region_init);
+module_exit(of_fpga_region_exit);
+
+MODULE_DESCRIPTION("FPGA Region");
+MODULE_AUTHOR("Alan Tull <atull@kernel.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index f8770af0f6b5..a46e343a5b72 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -519,8 +519,14 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
return -EBUSY;
}
- return fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager",
+ ret = fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager",
&socfpga_a10_fpga_mgr_ops, priv);
+ if (ret) {
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
+ return 0;
}
static int socfpga_a10_fpga_remove(struct platform_device *pdev)
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index 6821ed0cd5e8..513e35173aaa 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -2,9 +2,7 @@
# FSI subsystem
#
-menu "FSI support"
-
-config FSI
+menuconfig FSI
tristate "FSI support"
select CRC4
---help---
@@ -34,5 +32,3 @@ config FSI_SCOM
This option enables an FSI based SCOM device driver.
endif
-
-endmenu
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 8267439dd1ee..fe96aab9e794 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -49,9 +49,6 @@ struct hv_context hv_context = {
*/
int hv_init(void)
{
- if (!hv_is_hypercall_page_setup())
- return -ENOTSUPP;
-
hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
if (!hv_context.cpu_context)
return -ENOMEM;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 610223f0e945..bc65c4d79c1f 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -37,7 +37,6 @@
#include <linux/sched/task_stack.h>
#include <asm/hyperv.h>
-#include <asm/hypervisor.h>
#include <asm/mshyperv.h>
#include <linux/notifier.h>
#include <linux/ptrace.h>
@@ -1053,7 +1052,7 @@ static int vmbus_bus_init(void)
* Initialize the per-cpu interrupt state and
* connect to the host.
*/
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv:online",
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
hv_synic_init, hv_synic_cleanup);
if (ret < 0)
goto err_alloc;
@@ -1193,7 +1192,7 @@ static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
}
-VMBUS_CHAN_ATTR_RO(out_mask);
+static VMBUS_CHAN_ATTR_RO(out_mask);
static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
{
@@ -1201,7 +1200,7 @@ static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
}
-VMBUS_CHAN_ATTR_RO(in_mask);
+static VMBUS_CHAN_ATTR_RO(in_mask);
static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
{
@@ -1209,7 +1208,7 @@ static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
}
-VMBUS_CHAN_ATTR_RO(read_avail);
+static VMBUS_CHAN_ATTR_RO(read_avail);
static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
{
@@ -1217,13 +1216,13 @@ static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
}
-VMBUS_CHAN_ATTR_RO(write_avail);
+static VMBUS_CHAN_ATTR_RO(write_avail);
static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%u\n", channel->target_cpu);
}
-VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
+static VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
static ssize_t channel_pending_show(const struct vmbus_channel *channel,
char *buf)
@@ -1232,7 +1231,7 @@ static ssize_t channel_pending_show(const struct vmbus_channel *channel,
channel_pending(channel,
vmbus_connection.monitor_pages[1]));
}
-VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
+static VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
static ssize_t channel_latency_show(const struct vmbus_channel *channel,
char *buf)
@@ -1241,19 +1240,34 @@ static ssize_t channel_latency_show(const struct vmbus_channel *channel,
channel_latency(channel,
vmbus_connection.monitor_pages[1]));
}
-VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
+static VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->interrupts);
}
-VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
+static VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->sig_events);
}
-VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
+static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
+
+static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", channel->offermsg.monitorid);
+}
+static VMBUS_CHAN_ATTR(monitor_id, S_IRUGO, subchannel_monitor_id_show, NULL);
+
+static ssize_t subchannel_id_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%u\n",
+ channel->offermsg.offer.sub_channel_index);
+}
+static VMBUS_CHAN_ATTR_RO(subchannel_id);
static struct attribute *vmbus_chan_attrs[] = {
&chan_attr_out_mask.attr,
@@ -1265,6 +1279,8 @@ static struct attribute *vmbus_chan_attrs[] = {
&chan_attr_latency.attr,
&chan_attr_interrupts.attr,
&chan_attr_events.attr,
+ &chan_attr_monitor_id.attr,
+ &chan_attr_subchannel_id.attr,
NULL
};
@@ -1717,7 +1733,7 @@ static int __init hv_acpi_init(void)
{
int ret, t;
- if (x86_hyper_type != X86_HYPER_MS_HYPERV)
+ if (!hv_is_hyperv_initialized())
return -ENODEV;
init_completion(&probe_event);
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
index 8f4357e2626c..043da86b0fe9 100644
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
@@ -163,10 +163,8 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
desc.dev = &adev->dev;
desc.groups = replicator_groups;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev))
- return PTR_ERR(drvdata->csdev);
- return 0;
+ return PTR_ERR_OR_ZERO(drvdata->csdev);
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index e03e58933141..580cd381adf3 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -33,7 +33,6 @@
#include <linux/mm.h>
#include <linux/perf_event.h>
-#include <asm/local.h>
#include "coresight-priv.h"
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index fd3c396717f6..9f8ac0bef853 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -214,10 +214,8 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
desc.dev = dev;
desc.groups = coresight_funnel_groups;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev))
- return PTR_ERR(drvdata->csdev);
- return 0;
+ return PTR_ERR_OR_ZERO(drvdata->csdev);
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index bef49a3a5ca7..805f7c2210fe 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -46,8 +46,11 @@
#define TPIU_ITATBCTR0 0xef8
/** register definition **/
+/* FFSR - 0x300 */
+#define FFSR_FT_STOPPED BIT(1)
/* FFCR - 0x304 */
#define FFCR_FON_MAN BIT(6)
+#define FFCR_STOP_FI BIT(12)
/**
* @base: memory mapped base address for this component.
@@ -85,10 +88,14 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
- /* Clear formatter controle reg. */
- writel_relaxed(0x0, drvdata->base + TPIU_FFCR);
+ /* Clear formatter and stop on flush */
+ writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR);
/* Generate manual flush */
- writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+ writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+ /* Wait for flush to complete */
+ coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
+ /* Wait for formatter to stop */
+ coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
CS_LOCK(drvdata->base);
}
@@ -160,10 +167,8 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
desc.pdata = pdata;
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev))
- return PTR_ERR(drvdata->csdev);
- return 0;
+ return PTR_ERR_OR_ZERO(drvdata->csdev);
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index b8091bef21dc..389c4baeca9d 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -843,32 +843,17 @@ static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
}
-static int coresight_name_match(struct device *dev, void *data)
-{
- char *to_match;
- struct coresight_device *i_csdev;
-
- to_match = data;
- i_csdev = to_coresight_device(dev);
-
- if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
- return 1;
-
- return 0;
-}
-
static void coresight_fixup_device_conns(struct coresight_device *csdev)
{
int i;
- struct device *dev = NULL;
- struct coresight_connection *conn;
for (i = 0; i < csdev->nr_outport; i++) {
- conn = &csdev->conns[i];
- dev = bus_find_device(&coresight_bustype, NULL,
- (void *)conn->child_name,
- coresight_name_match);
+ struct coresight_connection *conn = &csdev->conns[i];
+ struct device *dev = NULL;
+ if (conn->child_name)
+ dev = bus_find_device_by_name(&coresight_bustype, NULL,
+ conn->child_name);
if (dev) {
conn->child_dev = to_coresight_device(dev);
/* and put reference from 'bus_find_device()' */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 7c0fa24f9067..6722073e339b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -53,7 +53,7 @@ config AD525X_DPOT_SPI
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
- depends on (AVR32 || ARCH_AT91)
+ depends on ARCH_AT91
help
Select this if you want a library to allocate the Timer/Counter
blocks found on many Atmel processors. This facilitates using
@@ -192,7 +192,7 @@ config ICS932S401
config ATMEL_SSC
tristate "Device driver for Atmel SSC peripheral"
- depends on HAS_IOMEM && (AVR32 || ARCH_AT91 || COMPILE_TEST)
+ depends on HAS_IOMEM && (ARCH_AT91 || COMPILE_TEST)
---help---
This option enables device driver support for Atmel Synchronized
Serial Communication peripheral (SSC).
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index fe1672747bc1..bc591b7168db 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -3,7 +3,7 @@
* Copyright (c) 2009-2010 Analog Devices, Inc.
* Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
*
- * DEVID #Wipers #Positions Resistor Options (kOhm)
+ * DEVID #Wipers #Positions Resistor Options (kOhm)
* AD5258 1 64 1, 10, 50, 100
* AD5259 1 256 5, 10, 50, 100
* AD5251 2 64 1, 10, 50, 100
@@ -84,12 +84,12 @@
struct dpot_data {
struct ad_dpot_bus_data bdata;
struct mutex update_lock;
- unsigned rdac_mask;
- unsigned max_pos;
+ unsigned int rdac_mask;
+ unsigned int max_pos;
unsigned long devid;
- unsigned uid;
- unsigned feat;
- unsigned wipers;
+ unsigned int uid;
+ unsigned int feat;
+ unsigned int wipers;
u16 rdac_cache[MAX_RDACS];
DECLARE_BITMAP(otp_en_mask, MAX_RDACS);
};
@@ -126,7 +126,7 @@ static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
{
- unsigned ctrl = 0;
+ unsigned int ctrl = 0;
int value;
if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
@@ -175,7 +175,7 @@ static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
{
int value;
- unsigned ctrl = 0;
+ unsigned int ctrl = 0;
switch (dpot->uid) {
case DPOT_UID(AD5246_ID):
@@ -238,7 +238,7 @@ static s32 dpot_read(struct dpot_data *dpot, u8 reg)
static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
{
- unsigned val = 0;
+ unsigned int val = 0;
if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) {
if (dpot->feat & F_RDACS_WONLY)
@@ -328,7 +328,7 @@ static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
{
/* Only write the instruction byte for certain commands */
- unsigned tmp = 0, ctrl = 0;
+ unsigned int tmp = 0, ctrl = 0;
switch (dpot->uid) {
case DPOT_UID(AD5246_ID):
@@ -515,11 +515,11 @@ set_##_name(struct device *dev, \
#define DPOT_DEVICE_SHOW_SET(name, reg) \
DPOT_DEVICE_SHOW(name, reg) \
DPOT_DEVICE_SET(name, reg) \
-static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name);
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name)
#define DPOT_DEVICE_SHOW_ONLY(name, reg) \
DPOT_DEVICE_SHOW(name, reg) \
-static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL);
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL)
DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0);
DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0);
@@ -616,7 +616,7 @@ set_##_name(struct device *dev, \
{ \
return sysfs_do_cmd(dev, attr, buf, count, _cmd); \
} \
-static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name);
+static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name)
DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL);
DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL);
@@ -636,7 +636,7 @@ static const struct attribute_group ad525x_group_commands = {
};
static int ad_dpot_add_files(struct device *dev,
- unsigned features, unsigned rdac)
+ unsigned int features, unsigned int rdac)
{
int err = sysfs_create_file(&dev->kobj,
dpot_attrib_wipers[rdac]);
@@ -661,7 +661,7 @@ static int ad_dpot_add_files(struct device *dev,
}
static inline void ad_dpot_remove_files(struct device *dev,
- unsigned features, unsigned rdac)
+ unsigned int features, unsigned int rdac)
{
sysfs_remove_file(&dev->kobj,
dpot_attrib_wipers[rdac]);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index 6bd1eba23bc0..443a51fd5680 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -195,12 +195,12 @@ enum dpot_devid {
struct dpot_data;
struct ad_dpot_bus_ops {
- int (*read_d8) (void *client);
- int (*read_r8d8) (void *client, u8 reg);
- int (*read_r8d16) (void *client, u8 reg);
- int (*write_d8) (void *client, u8 val);
- int (*write_r8d8) (void *client, u8 reg, u8 val);
- int (*write_r8d16) (void *client, u8 reg, u16 val);
+ int (*read_d8)(void *client);
+ int (*read_r8d8)(void *client, u8 reg);
+ int (*read_r8d16)(void *client, u8 reg);
+ int (*write_d8)(void *client, u8 val);
+ int (*write_r8d8)(void *client, u8 reg, u8 val);
+ int (*write_r8d16)(void *client, u8 reg, u16 val);
};
struct ad_dpot_bus_data {
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index c9f07032c2fc..ed9412d750b7 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -715,6 +715,7 @@ static ssize_t apds990x_rate_avail(struct device *dev,
{
int i;
int pos = 0;
+
for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
pos += sprintf(buf + pos, "%d ", arates_hz[i]);
sprintf(buf + pos - 1, "\n");
@@ -725,6 +726,7 @@ static ssize_t apds990x_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
+
return sprintf(buf, "%d\n", chip->arate);
}
@@ -784,6 +786,7 @@ static ssize_t apds990x_prox_show(struct device *dev,
{
ssize_t ret;
struct apds990x_chip *chip = dev_get_drvdata(dev);
+
if (pm_runtime_suspended(dev) || !chip->prox_en)
return -EIO;
@@ -807,6 +810,7 @@ static ssize_t apds990x_prox_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
+
return sprintf(buf, "%d\n", chip->prox_en);
}
@@ -847,6 +851,7 @@ static ssize_t apds990x_prox_reporting_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
+
return sprintf(buf, "%s\n",
reporting_modes[!!chip->prox_continuous_mode]);
}
@@ -884,6 +889,7 @@ static ssize_t apds990x_lux_thresh_above_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
+
return sprintf(buf, "%d\n", chip->lux_thres_hi);
}
@@ -891,6 +897,7 @@ static ssize_t apds990x_lux_thresh_below_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
+
return sprintf(buf, "%d\n", chip->lux_thres_lo);
}
@@ -926,6 +933,7 @@ static ssize_t apds990x_lux_thresh_above_store(struct device *dev,
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_hi, buf);
+
if (ret < 0)
return ret;
return len;
@@ -937,6 +945,7 @@ static ssize_t apds990x_lux_thresh_below_store(struct device *dev,
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_lo, buf);
+
if (ret < 0)
return ret;
return len;
@@ -954,6 +963,7 @@ static ssize_t apds990x_prox_threshold_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
+
return sprintf(buf, "%d\n", chip->prox_thres);
}
@@ -1026,6 +1036,7 @@ static ssize_t apds990x_chip_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
+
return sprintf(buf, "%s %d\n", chip->chipname, chip->revision);
}
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 7231260ac287..98a921ea9ee8 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -59,25 +59,42 @@ static ssize_t ds1682_show(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
- __le32 val = 0;
+ unsigned long long val, check;
+ __le32 val_le = 0;
int rc;
dev_dbg(dev, "ds1682_show() called on %s\n", attr->attr.name);
/* Read the register */
rc = i2c_smbus_read_i2c_block_data(client, sattr->index, sattr->nr,
- (u8 *) & val);
+ (u8 *)&val_le);
if (rc < 0)
return -EIO;
- /* Special case: the 32 bit regs are time values with 1/4s
- * resolution, scale them up to milliseconds */
- if (sattr->nr == 4)
- return sprintf(buf, "%llu\n",
- ((unsigned long long)le32_to_cpu(val)) * 250);
+ val = le32_to_cpu(val_le);
+
+ if (sattr->index == DS1682_REG_ELAPSED) {
+ int retries = 5;
+
+ /* Detect and retry when a tick occurs mid-read */
+ do {
+ rc = i2c_smbus_read_i2c_block_data(client, sattr->index,
+ sattr->nr,
+ (u8 *)&val_le);
+ if (rc < 0 || retries <= 0)
+ return -EIO;
+
+ check = val;
+ val = le32_to_cpu(val_le);
+ retries--;
+ } while (val != check && val != (check + 1));
+ }
- /* Format the output string and return # of bytes */
- return sprintf(buf, "%li\n", (long)le32_to_cpu(val));
+ /* Format the output string and return # of bytes
+ * Special case: the 32 bit regs are time values with 1/4s
+ * resolution, scale them up to milliseconds
+ */
+ return sprintf(buf, "%llu\n", (sattr->nr == 4) ? (val * 250) : val);
}
static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 5afe4cd16569..9282ffd607ff 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -276,6 +276,9 @@ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
return -ENODEV;
}
switch (val) {
+ case 9:
+ chip->flags |= EE_INSTR_BIT3_IS_ADDR;
+ /* fall through */
case 8:
chip->flags |= EE_ADDR1;
break;
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index eb29113e0bac..5a17bfeb80d3 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -468,7 +468,7 @@ static struct class enclosure_class = {
.dev_groups = enclosure_class_groups,
};
-static const char *const enclosure_status [] = {
+static const char *const enclosure_status[] = {
[ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported",
[ENCLOSURE_STATUS_OK] = "OK",
[ENCLOSURE_STATUS_CRITICAL] = "critical",
@@ -480,7 +480,7 @@ static const char *const enclosure_status [] = {
[ENCLOSURE_STATUS_MAX] = NULL,
};
-static const char *const enclosure_type [] = {
+static const char *const enclosure_type[] = {
[ENCLOSURE_COMPONENT_DEVICE] = "device",
[ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device",
};
@@ -680,13 +680,7 @@ ATTRIBUTE_GROUPS(enclosure_component);
static int __init enclosure_init(void)
{
- int err;
-
- err = class_register(&enclosure_class);
- if (err)
- return err;
-
- return 0;
+ return class_register(&enclosure_class);
}
static void __exit enclosure_exit(void)
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c
index 71d2793b372c..607b489a6501 100644
--- a/drivers/misc/fsa9480.c
+++ b/drivers/misc/fsa9480.c
@@ -465,6 +465,7 @@ fail1:
static int fsa9480_remove(struct i2c_client *client)
{
struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
+
if (client->irq)
free_irq(client->irq, usbsw);
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 4fd21e86ad56..c7cd3675bcd1 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -153,11 +153,11 @@ static struct genwqe_dev *genwqe_dev_alloc(void)
cd->card_state = GENWQE_CARD_UNUSED;
spin_lock_init(&cd->print_lock);
- cd->ddcb_software_timeout = genwqe_ddcb_software_timeout;
- cd->kill_timeout = genwqe_kill_timeout;
+ cd->ddcb_software_timeout = GENWQE_DDCB_SOFTWARE_TIMEOUT;
+ cd->kill_timeout = GENWQE_KILL_TIMEOUT;
for (j = 0; j < GENWQE_MAX_VFS; j++)
- cd->vf_jobtimeout_msec[j] = genwqe_vf_jobtimeout_msec;
+ cd->vf_jobtimeout_msec[j] = GENWQE_VF_JOBTIMEOUT_MSEC;
genwqe_devices[i] = cd;
return cd;
@@ -324,11 +324,11 @@ static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd)
u32 T = genwqe_T_psec(cd);
u64 x;
- if (genwqe_pf_jobtimeout_msec == 0)
+ if (GENWQE_PF_JOBTIMEOUT_MSEC == 0)
return false;
/* PF: large value needed, flash update 2sec per block */
- x = ilog2(genwqe_pf_jobtimeout_msec *
+ x = ilog2(GENWQE_PF_JOBTIMEOUT_MSEC *
16000000000uL/(T * 15)) - 10;
genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
@@ -904,7 +904,7 @@ static int genwqe_reload_bistream(struct genwqe_dev *cd)
* b) a critical GFIR occured
*
* Informational GFIRs are checked and potentially printed in
- * health_check_interval seconds.
+ * GENWQE_HEALTH_CHECK_INTERVAL seconds.
*/
static int genwqe_health_thread(void *data)
{
@@ -918,7 +918,7 @@ static int genwqe_health_thread(void *data)
rc = wait_event_interruptible_timeout(cd->health_waitq,
(genwqe_health_check_cond(cd, &gfir) ||
(should_stop = kthread_should_stop())),
- genwqe_health_check_interval * HZ);
+ GENWQE_HEALTH_CHECK_INTERVAL * HZ);
if (should_stop)
break;
@@ -1028,7 +1028,7 @@ static int genwqe_health_check_start(struct genwqe_dev *cd)
{
int rc;
- if (genwqe_health_check_interval <= 0)
+ if (GENWQE_HEALTH_CHECK_INTERVAL <= 0)
return 0; /* valid for disabling the service */
/* moved before request_irq() */
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 3743c87f8ab9..1c3967f10f55 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -47,13 +47,13 @@
#define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS)
/* Compile parameters, some of them appear in debugfs for later adjustment */
-#define genwqe_ddcb_max 32 /* DDCBs on the work-queue */
-#define genwqe_polling_enabled 0 /* in case of irqs not working */
-#define genwqe_ddcb_software_timeout 10 /* timeout per DDCB in seconds */
-#define genwqe_kill_timeout 8 /* time until process gets killed */
-#define genwqe_vf_jobtimeout_msec 250 /* 250 msec */
-#define genwqe_pf_jobtimeout_msec 8000 /* 8 sec should be ok */
-#define genwqe_health_check_interval 4 /* <= 0: disabled */
+#define GENWQE_DDCB_MAX 32 /* DDCBs on the work-queue */
+#define GENWQE_POLLING_ENABLED 0 /* in case of irqs not working */
+#define GENWQE_DDCB_SOFTWARE_TIMEOUT 10 /* timeout per DDCB in seconds */
+#define GENWQE_KILL_TIMEOUT 8 /* time until process gets killed */
+#define GENWQE_VF_JOBTIMEOUT_MSEC 250 /* 250 msec */
+#define GENWQE_PF_JOBTIMEOUT_MSEC 8000 /* 8 sec should be ok */
+#define GENWQE_HEALTH_CHECK_INTERVAL 4 /* <= 0: disabled */
/* Sysfs attribute groups used when we create the genwqe device */
extern const struct attribute_group *genwqe_attribute_groups[];
@@ -490,11 +490,9 @@ int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len);
/* Memory allocation/deallocation; dma address handling */
int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
- void *uaddr, unsigned long size,
- struct ddcb_requ *req);
+ void *uaddr, unsigned long size);
-int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
- struct ddcb_requ *req);
+int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m);
static inline bool dma_mapping_used(struct dma_mapping *m)
{
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index ddfeefe39540..b7f8d35c17a9 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -500,7 +500,7 @@ int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no],
ddcb_requ_finished(cd, req),
- genwqe_ddcb_software_timeout * HZ);
+ GENWQE_DDCB_SOFTWARE_TIMEOUT * HZ);
/*
* We need to distinguish 3 cases here:
@@ -633,7 +633,7 @@ int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
__be32 old, new;
/* unsigned long flags; */
- if (genwqe_ddcb_software_timeout <= 0) {
+ if (GENWQE_DDCB_SOFTWARE_TIMEOUT <= 0) {
dev_err(&pci_dev->dev,
"[%s] err: software timeout is not set!\n", __func__);
return -EFAULT;
@@ -641,7 +641,7 @@ int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
pddcb = &queue->ddcb_vaddr[req->num];
- for (t = 0; t < genwqe_ddcb_software_timeout * 10; t++) {
+ for (t = 0; t < GENWQE_DDCB_SOFTWARE_TIMEOUT * 10; t++) {
spin_lock_irqsave(&queue->ddcb_lock, flags);
@@ -718,7 +718,7 @@ go_home:
dev_err(&pci_dev->dev,
"[%s] err: DDCB#%d not purged and not completed after %d seconds QSTAT=%016llx!!\n",
- __func__, req->num, genwqe_ddcb_software_timeout,
+ __func__, req->num, GENWQE_DDCB_SOFTWARE_TIMEOUT,
queue_status);
print_ddcb_info(cd, req->queue);
@@ -778,7 +778,7 @@ int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
/* FIXME circumvention to improve performance when no irq is
* there.
*/
- if (genwqe_polling_enabled)
+ if (GENWQE_POLLING_ENABLED)
genwqe_check_ddcb_queue(cd, queue);
/*
@@ -878,7 +878,7 @@ int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16);
/* enable DDCB completion irq */
- if (!genwqe_polling_enabled)
+ if (!GENWQE_POLLING_ENABLED)
pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32;
dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num);
@@ -1028,10 +1028,10 @@ static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
unsigned int queue_size;
struct pci_dev *pci_dev = cd->pci_dev;
- if (genwqe_ddcb_max < 2)
+ if (GENWQE_DDCB_MAX < 2)
return -EINVAL;
- queue_size = roundup(genwqe_ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
+ queue_size = roundup(GENWQE_DDCB_MAX * sizeof(struct ddcb), PAGE_SIZE);
queue->ddcbs_in_flight = 0; /* statistics */
queue->ddcbs_max_in_flight = 0;
@@ -1040,7 +1040,7 @@ static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
queue->wait_on_busy = 0;
queue->ddcb_seq = 0x100; /* start sequence number */
- queue->ddcb_max = genwqe_ddcb_max; /* module parameter */
+ queue->ddcb_max = GENWQE_DDCB_MAX;
queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size,
&queue->ddcb_daddr);
if (queue->ddcb_vaddr == NULL) {
@@ -1194,7 +1194,7 @@ static int genwqe_card_thread(void *data)
genwqe_check_ddcb_queue(cd, &cd->queue);
- if (genwqe_polling_enabled) {
+ if (GENWQE_POLLING_ENABLED) {
rc = wait_event_interruptible_timeout(
cd->queue_waitq,
genwqe_ddcbs_in_flight(cd) ||
@@ -1340,7 +1340,7 @@ static int queue_wake_up_all(struct genwqe_dev *cd)
int genwqe_finish_queue(struct genwqe_dev *cd)
{
int i, rc = 0, in_flight;
- int waitmax = genwqe_ddcb_software_timeout;
+ int waitmax = GENWQE_DDCB_SOFTWARE_TIMEOUT;
struct pci_dev *pci_dev = cd->pci_dev;
struct ddcb_queue *queue = &cd->queue;
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index c715534e7fe7..f921dd590271 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -198,7 +198,7 @@ static int genwqe_jtimer_show(struct seq_file *s, void *unused)
jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, 0);
seq_printf(s, " PF 0x%016llx %d msec\n", jtimer,
- genwqe_pf_jobtimeout_msec);
+ GENWQE_PF_JOBTIMEOUT_MSEC);
for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) {
jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 3ecfa35457e0..0dd6b5ef314a 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -226,7 +226,7 @@ static void genwqe_remove_mappings(struct genwqe_file *cfile)
kfree(dma_map);
} else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
/* we use dma_map statically from the request */
- genwqe_user_vunmap(cd, dma_map, NULL);
+ genwqe_user_vunmap(cd, dma_map);
}
}
}
@@ -249,7 +249,7 @@ static void genwqe_remove_pinnings(struct genwqe_file *cfile)
* deleted.
*/
list_del_init(&dma_map->pin_list);
- genwqe_user_vunmap(cd, dma_map, NULL);
+ genwqe_user_vunmap(cd, dma_map);
kfree(dma_map);
}
}
@@ -790,7 +790,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
return -ENOMEM;
genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
- rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL);
+ rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size);
if (rc != 0) {
dev_err(&pci_dev->dev,
"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
@@ -820,7 +820,7 @@ static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
return -ENOENT;
genwqe_del_pin(cfile, dma_map);
- genwqe_user_vunmap(cd, dma_map, NULL);
+ genwqe_user_vunmap(cd, dma_map);
kfree(dma_map);
return 0;
}
@@ -841,7 +841,7 @@ static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
if (dma_mapping_used(dma_map)) {
__genwqe_del_mapping(cfile, dma_map);
- genwqe_user_vunmap(cd, dma_map, req);
+ genwqe_user_vunmap(cd, dma_map);
}
if (req->sgls[i].sgl != NULL)
genwqe_free_sync_sgl(cd, &req->sgls[i]);
@@ -947,7 +947,7 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
m->write = 0;
rc = genwqe_user_vmap(cd, m, (void *)u_addr,
- u_size, req);
+ u_size);
if (rc != 0)
goto err_out;
@@ -1011,7 +1011,6 @@ static int do_execute_ddcb(struct genwqe_file *cfile,
{
int rc;
struct genwqe_ddcb_cmd *cmd;
- struct ddcb_requ *req;
struct genwqe_dev *cd = cfile->cd;
struct file *filp = cfile->filp;
@@ -1019,8 +1018,6 @@ static int do_execute_ddcb(struct genwqe_file *cfile,
if (cmd == NULL)
return -ENOMEM;
- req = container_of(cmd, struct ddcb_requ, cmd);
-
if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
ddcb_requ_free(cmd);
return -EFAULT;
@@ -1345,7 +1342,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
rc = genwqe_kill_fasync(cd, SIGIO);
if (rc > 0) {
/* give kill_timeout seconds to close file descriptors ... */
- for (i = 0; (i < genwqe_kill_timeout) &&
+ for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
genwqe_open_files(cd); i++) {
dev_info(&pci_dev->dev, " %d sec ...", i);
@@ -1363,7 +1360,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */
if (rc) {
/* Give kill_timout more seconds to end processes */
- for (i = 0; (i < genwqe_kill_timeout) &&
+ for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
genwqe_open_files(cd); i++) {
dev_warn(&pci_dev->dev, " %d sec ...", i);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 5c0d917636f7..8f2e6442d88b 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -524,22 +524,16 @@ int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
}
/**
- * free_user_pages() - Give pinned pages back
+ * genwqe_free_user_pages() - Give pinned pages back
*
- * Documentation of get_user_pages is in mm/memory.c:
+ * Documentation of get_user_pages is in mm/gup.c:
*
* If the page is written to, set_page_dirty (or set_page_dirty_lock,
* as appropriate) must be called after the page is finished with, and
* before put_page is called.
- *
- * FIXME Could be of use to others and might belong in the generic
- * code, if others agree. E.g.
- * ll_free_user_pages in drivers/staging/lustre/lustre/llite/rw26.c
- * ceph_put_page_vector in net/ceph/pagevec.c
- * maybe more?
*/
-static int free_user_pages(struct page **page_list, unsigned int nr_pages,
- int dirty)
+static int genwqe_free_user_pages(struct page **page_list,
+ unsigned int nr_pages, int dirty)
{
unsigned int i;
@@ -577,7 +571,7 @@ static int free_user_pages(struct page **page_list, unsigned int nr_pages,
* Return: 0 if success
*/
int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
- unsigned long size, struct ddcb_requ *req)
+ unsigned long size)
{
int rc = -EINVAL;
unsigned long data, offs;
@@ -617,7 +611,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* assumption: get_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
- free_user_pages(m->page_list, rc, m->write);
+ genwqe_free_user_pages(m->page_list, rc, m->write);
rc = -EFAULT;
goto fail_get_user_pages;
}
@@ -629,7 +623,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
return 0;
fail_free_user_pages:
- free_user_pages(m->page_list, m->nr_pages, m->write);
+ genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
fail_get_user_pages:
kfree(m->page_list);
@@ -647,8 +641,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
* @cd: pointer to genwqe device
* @m: mapping params
*/
-int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
- struct ddcb_requ *req)
+int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
{
struct pci_dev *pci_dev = cd->pci_dev;
@@ -662,7 +655,7 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
- free_user_pages(m->page_list, m->nr_pages, m->write);
+ genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
kfree(m->page_list);
m->page_list = NULL;
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 95ce3e891b1b..35693c0a78e2 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the HP iLO management processor.
*
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* David Altobelli <david.altobelli@hpe.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index b97672e0cf90..94dfb9e40e29 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/char/hpilo.h
*
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* David Altobelli <david.altobelli@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __HPILO_H
#define __HPILO_H
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 28f51e01fd2b..81a0541ef3ac 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -33,7 +33,7 @@ static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END };
/* ICS932S401 registers */
#define ICS932S401_REG_CFG2 0x01
-#define ICS932S401_CFG1_SPREAD 0x01
+#define ICS932S401_CFG1_SPREAD 0x01
#define ICS932S401_REG_CFG7 0x06
#define ICS932S401_FS_MASK 0x07
#define ICS932S401_REG_VENDOR_REV 0x07
@@ -58,7 +58,7 @@ static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END };
#define ICS932S401_REG_SRC_SPREAD1 0x11
#define ICS932S401_REG_SRC_SPREAD2 0x12
#define ICS932S401_REG_CPU_DIVISOR 0x13
-#define ICS932S401_CPU_DIVISOR_SHIFT 4
+#define ICS932S401_CPU_DIVISOR_SHIFT 4
#define ICS932S401_REG_PCISRC_DIVISOR 0x14
#define ICS932S401_SRC_DIVISOR_MASK 0x0F
#define ICS932S401_PCI_DIVISOR_SHIFT 4
@@ -225,6 +225,7 @@ static ssize_t show_cpu_clock_sel(struct device *dev,
else {
/* Freq is neatly wrapped up for us */
int fid = data->regs[ICS932S401_REG_CFG7] & ICS932S401_FS_MASK;
+
freq = fs_speeds[fid];
if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT) {
switch (freq) {
@@ -352,8 +353,7 @@ static DEVICE_ATTR(ref_clock, S_IRUGO, show_value, NULL);
static DEVICE_ATTR(cpu_spread, S_IRUGO, show_spread, NULL);
static DEVICE_ATTR(src_spread, S_IRUGO, show_spread, NULL);
-static struct attribute *ics932s401_attr[] =
-{
+static struct attribute *ics932s401_attr[] = {
&dev_attr_spread_enabled.attr,
&dev_attr_cpu_clock_selection.attr,
&dev_attr_cpu_clock.attr,
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index 976df0013633..b8032882c865 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -78,6 +78,7 @@ static int __isl29003_read_reg(struct i2c_client *client,
u32 reg, u8 mask, u8 shift)
{
struct isl29003_data *data = i2c_get_clientdata(client);
+
return (data->reg_cache[reg] & mask) >> shift;
}
@@ -160,6 +161,7 @@ static int isl29003_get_power_state(struct i2c_client *client)
{
struct isl29003_data *data = i2c_get_clientdata(client);
u8 cmdreg = data->reg_cache[ISL29003_REG_COMMAND];
+
return ~cmdreg & ISL29003_ADC_PD;
}
@@ -196,6 +198,7 @@ static ssize_t isl29003_show_range(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
+
return sprintf(buf, "%i\n", isl29003_get_range(client));
}
@@ -231,6 +234,7 @@ static ssize_t isl29003_show_resolution(struct device *dev,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
+
return sprintf(buf, "%d\n", isl29003_get_resolution(client));
}
@@ -264,6 +268,7 @@ static ssize_t isl29003_show_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
+
return sprintf(buf, "%d\n", isl29003_get_mode(client));
}
@@ -298,6 +303,7 @@ static ssize_t isl29003_show_power_state(struct device *dev,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
+
return sprintf(buf, "%d\n", isl29003_get_power_state(client));
}
@@ -361,6 +367,7 @@ static int isl29003_init_client(struct i2c_client *client)
* if one of the reads fails, we consider the init failed */
for (i = 0; i < ARRAY_SIZE(data->reg_cache); i++) {
int v = i2c_smbus_read_byte_data(client, i);
+
if (v < 0)
return -ENODEV;
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index ba92291508dc..4942da93d066 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -96,7 +96,7 @@ static struct crashpoint crashpoints[] = {
CRASHPOINT("DIRECT", NULL),
#ifdef CONFIG_KPROBES
CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"),
- CRASHPOINT("INT_HW_IRQ_EN", "handle_IRQ_event"),
+ CRASHPOINT("INT_HW_IRQ_EN", "handle_irq_event"),
CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"),
CRASHPOINT("FS_DEVRW", "ll_rw_block"),
CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
diff --git a/drivers/misc/lkdtm_heap.c b/drivers/misc/lkdtm_heap.c
index f5494a6d4be5..65026d7de130 100644
--- a/drivers/misc/lkdtm_heap.c
+++ b/drivers/misc/lkdtm_heap.c
@@ -16,6 +16,8 @@ void lkdtm_OVERWRITE_ALLOCATION(void)
{
size_t len = 1020;
u32 *data = kmalloc(len, GFP_KERNEL);
+ if (!data)
+ return;
data[1024 / sizeof(u32)] = 0x12345678;
kfree(data);
@@ -33,6 +35,8 @@ void lkdtm_WRITE_AFTER_FREE(void)
size_t offset = (len / sizeof(*base)) / 2;
base = kmalloc(len, GFP_KERNEL);
+ if (!base)
+ return;
pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
pr_info("Attempting bad write to freed memory at %p\n",
&base[offset]);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 1ac10cb64d6e..3e5eabdae8d9 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -543,14 +543,20 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
mutex_lock(&bus->device_lock);
if (!mei_cl_is_connected(cl)) {
- dev_dbg(bus->dev, "Already disconnected");
+ dev_dbg(bus->dev, "Already disconnected\n");
+ err = 0;
+ goto out;
+ }
+
+ if (bus->dev_state == MEI_DEV_POWER_DOWN) {
+ dev_dbg(bus->dev, "Device is powering down, don't bother with disconnection\n");
err = 0;
goto out;
}
err = mei_cl_disconnect(cl);
if (err < 0)
- dev_err(bus->dev, "Could not disconnect from the ME client");
+ dev_err(bus->dev, "Could not disconnect from the ME client\n");
out:
/* Flush queues and remove any pending read */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 10dcf4ff99a5..334ab02e1de2 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1260,7 +1260,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
if (rets == -ENODATA)
break;
- if (rets && dev->dev_state != MEI_DEV_RESETTING) {
+ if (rets &&
+ (dev->dev_state != MEI_DEV_RESETTING &&
+ dev->dev_state != MEI_DEV_POWER_DOWN)) {
dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
rets);
schedule_work(&dev->reset_work);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 24e4a4c96606..c2c8993e2a51 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1127,7 +1127,9 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
/* Read from TXE */
rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
- if (rets && dev->dev_state != MEI_DEV_RESETTING) {
+ if (rets &&
+ (dev->dev_state != MEI_DEV_RESETTING &&
+ dev->dev_state != MEI_DEV_POWER_DOWN)) {
dev_err(dev->dev,
"mei_irq_read_handler ret = %d.\n", rets);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index d2f691424dd1..c46f6e99a55e 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -310,6 +310,9 @@ void mei_stop(struct mei_device *dev)
{
dev_dbg(dev->dev, "stopping the device.\n");
+ mutex_lock(&dev->device_lock);
+ dev->dev_state = MEI_DEV_POWER_DOWN;
+ mutex_unlock(&dev->device_lock);
mei_cl_bus_remove_devices(dev);
mei_cancel_work(dev);
@@ -319,7 +322,6 @@ void mei_stop(struct mei_device *dev)
mutex_lock(&dev->device_lock);
- dev->dev_state = MEI_DEV_POWER_DOWN;
mei_reset(dev);
/* move device to disabled state unconditionally */
dev->dev_state = MEI_DEV_DISABLED;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index f4f17552c9b8..4a0ccda4d04b 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -238,8 +238,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
mei_me_set_pm_domain(dev);
- if (mei_pg_is_enabled(dev))
+ if (mei_pg_is_enabled(dev)) {
pm_runtime_put_noidle(&pdev->dev);
+ if (hw->d0i3_supported)
+ pm_runtime_allow(&pdev->dev);
+ }
dev_dbg(&pdev->dev, "initialization successful.\n");
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index 4120ed8f0cae..01d1f2ba7bb8 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -937,13 +937,10 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
dd.num_vq > MIC_MAX_VRINGS)
return -EINVAL;
- dd_config = kzalloc(mic_desc_size(&dd), GFP_KERNEL);
- if (!dd_config)
- return -ENOMEM;
- if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
- ret = -EFAULT;
- goto free_ret;
- }
+ dd_config = memdup_user(argp, mic_desc_size(&dd));
+ if (IS_ERR(dd_config))
+ return PTR_ERR(dd_config);
+
/* Ensure desc has not changed between the two reads */
if (memcmp(&dd, dd_config, sizeof(dd))) {
ret = -EINVAL;
@@ -995,17 +992,12 @@ _unlock_ret:
ret = vop_vdev_inited(vdev);
if (ret)
goto __unlock_ret;
- buf = kzalloc(vdev->dd->config_len, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
+ buf = memdup_user(argp, vdev->dd->config_len);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
goto __unlock_ret;
}
- if (copy_from_user(buf, argp, vdev->dd->config_len)) {
- ret = -EFAULT;
- goto done;
- }
ret = vop_virtio_config_change(vdev, buf);
-done:
kfree(buf);
__unlock_ret:
mutex_unlock(&vdev->vdev_mutex);
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 2cde80c7bb93..9eea30f54fd6 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -270,10 +270,8 @@ static int vexpress_syscfg_probe(struct platform_device *pdev)
/* Must use dev.parent (MFD), as that's where DT phandle points at... */
bridge = vexpress_config_bridge_register(pdev->dev.parent,
&vexpress_syscfg_bridge_ops, syscfg);
- if (IS_ERR(bridge))
- return PTR_ERR(bridge);
- return 0;
+ return PTR_ERR_OR_ZERO(bridge);
}
static const struct platform_device_id vexpress_syscfg_id_table[] = {
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index 19e4e904c9bf..6241678e99af 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Multiplexer devices
#
diff --git a/drivers/mux/Makefile b/drivers/mux/Makefile
index 0e1e59760e3f..c3d883955fd5 100644
--- a/drivers/mux/Makefile
+++ b/drivers/mux/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for multiplexer devices.
#
diff --git a/drivers/mux/adg792a.c b/drivers/mux/adg792a.c
index 12aa221ab90d..6a8725cf3d71 100644
--- a/drivers/mux/adg792a.c
+++ b/drivers/mux/adg792a.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Multiplexer driver for Analog Devices ADG792A/G Triple 4:1 mux
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <peda@axentia.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/err.h>
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 6e5cf9d9cd99..d1271c1ee23c 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Multiplexer subsystem
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <peda@axentia.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "mux-core: " fmt
diff --git a/drivers/mux/gpio.c b/drivers/mux/gpio.c
index 468bf1709606..6fdd9316db8b 100644
--- a/drivers/mux/gpio.c
+++ b/drivers/mux/gpio.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPIO-controlled multiplexer driver
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <peda@axentia.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/err.h>
diff --git a/drivers/mux/mmio.c b/drivers/mux/mmio.c
index 37c1de359a70..935ac44aa209 100644
--- a/drivers/mux/mmio.c
+++ b/drivers/mux/mmio.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MMIO register bitfield-controlled multiplexer driver
*
* Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 5a5cefd12153..35a3dbeea324 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -444,7 +444,6 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem,
struct nvmem_device *nvmem_register(const struct nvmem_config *config)
{
struct nvmem_device *nvmem;
- struct device_node *np;
int rval;
if (!config->dev)
@@ -464,8 +463,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->owner = config->owner;
if (!nvmem->owner && config->dev->driver)
nvmem->owner = config->dev->driver->owner;
- nvmem->stride = config->stride;
- nvmem->word_size = config->word_size;
+ nvmem->stride = config->stride ?: 1;
+ nvmem->word_size = config->word_size ?: 1;
nvmem->size = config->size;
nvmem->dev.type = &nvmem_provider_type;
nvmem->dev.bus = &nvmem_bus_type;
@@ -473,13 +472,12 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->priv = config->priv;
nvmem->reg_read = config->reg_read;
nvmem->reg_write = config->reg_write;
- np = config->dev->of_node;
- nvmem->dev.of_node = np;
+ nvmem->dev.of_node = config->dev->of_node;
dev_set_name(&nvmem->dev, "%s%d",
config->name ? : "nvmem",
config->name ? config->id : nvmem->id);
- nvmem->read_only = of_property_read_bool(np, "read-only") |
+ nvmem->read_only = device_property_present(config->dev, "read-only") |
config->read_only;
if (config->root_only)
@@ -600,16 +598,11 @@ static void __nvmem_device_put(struct nvmem_device *nvmem)
mutex_unlock(&nvmem_mutex);
}
-static int nvmem_match(struct device *dev, void *data)
-{
- return !strcmp(dev_name(dev), data);
-}
-
static struct nvmem_device *nvmem_find(const char *name)
{
struct device *d;
- d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match);
+ d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
if (!d)
return NULL;
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index 123de77ca5d6..f13a8335f364 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -32,6 +32,14 @@
#define RK3288_STROBE BIT(1)
#define RK3288_CSB BIT(0)
+#define RK3328_SECURE_SIZES 96
+#define RK3328_INT_STATUS 0x0018
+#define RK3328_DOUT 0x0020
+#define RK3328_AUTO_CTRL 0x0024
+#define RK3328_INT_FINISH BIT(0)
+#define RK3328_AUTO_ENB BIT(0)
+#define RK3328_AUTO_RD BIT(1)
+
#define RK3399_A_SHIFT 16
#define RK3399_A_MASK 0x3ff
#define RK3399_NBYTES 4
@@ -92,6 +100,60 @@ static int rockchip_rk3288_efuse_read(void *context, unsigned int offset,
return 0;
}
+static int rockchip_rk3328_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_efuse_chip *efuse = context;
+ unsigned int addr_start, addr_end, addr_offset, addr_len;
+ u32 out_value, status;
+ u8 *buf;
+ int ret, i = 0;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret < 0) {
+ dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
+ return ret;
+ }
+
+ /* 128 Byte efuse, 96 Byte for secure, 32 Byte for non-secure */
+ offset += RK3328_SECURE_SIZES;
+ addr_start = rounddown(offset, RK3399_NBYTES) / RK3399_NBYTES;
+ addr_end = roundup(offset + bytes, RK3399_NBYTES) / RK3399_NBYTES;
+ addr_offset = offset % RK3399_NBYTES;
+ addr_len = addr_end - addr_start;
+
+ buf = kzalloc(sizeof(*buf) * addr_len * RK3399_NBYTES, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto nomem;
+ }
+
+ while (addr_len--) {
+ writel(RK3328_AUTO_RD | RK3328_AUTO_ENB |
+ ((addr_start++ & RK3399_A_MASK) << RK3399_A_SHIFT),
+ efuse->base + RK3328_AUTO_CTRL);
+ udelay(4);
+ status = readl(efuse->base + RK3328_INT_STATUS);
+ if (!(status & RK3328_INT_FINISH)) {
+ ret = -EIO;
+ goto err;
+ }
+ out_value = readl(efuse->base + RK3328_DOUT);
+ writel(RK3328_INT_FINISH, efuse->base + RK3328_INT_STATUS);
+
+ memcpy(&buf[i], &out_value, RK3399_NBYTES);
+ i += RK3399_NBYTES;
+ }
+
+ memcpy(val, buf + addr_offset, bytes);
+err:
+ kfree(buf);
+nomem:
+ clk_disable_unprepare(efuse->clk);
+
+ return ret;
+}
+
static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
@@ -181,6 +243,10 @@ static const struct of_device_id rockchip_efuse_match[] = {
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
+ .compatible = "rockchip,rk3328-efuse",
+ .data = (void *)&rockchip_rk3328_efuse_read,
+ },
+ {
.compatible = "rockchip,rk3399-efuse",
.data = (void *)&rockchip_rk3399_efuse_read,
},
@@ -217,7 +283,9 @@ static int rockchip_efuse_probe(struct platform_device *pdev)
return PTR_ERR(efuse->clk);
efuse->dev = &pdev->dev;
- econfig.size = resource_size(res);
+ if (of_property_read_u32(dev->of_node, "rockchip,efuse-size",
+ &econfig.size))
+ econfig.size = resource_size(res);
econfig.reg_read = match->data;
econfig.priv = efuse;
econfig.dev = efuse->dev;
diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c
index 9d278b4e1dc7..be11880a1358 100644
--- a/drivers/nvmem/uniphier-efuse.c
+++ b/drivers/nvmem/uniphier-efuse.c
@@ -27,11 +27,11 @@ static int uniphier_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
struct uniphier_efuse_priv *priv = context;
- u32 *val = _val;
+ u8 *val = _val;
int offs;
- for (offs = 0; offs < bytes; offs += sizeof(u32))
- *val++ = readl(priv->base + reg + offs);
+ for (offs = 0; offs < bytes; offs += sizeof(u8))
+ *val++ = readb(priv->base + reg + offs);
return 0;
}
@@ -53,8 +53,8 @@ static int uniphier_efuse_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- econfig.stride = 4;
- econfig.word_size = 4;
+ econfig.stride = 1;
+ econfig.word_size = 1;
econfig.read_only = true;
econfig.reg_read = uniphier_reg_read;
econfig.size = resource_size(res);
diff --git a/drivers/siox/Kconfig b/drivers/siox/Kconfig
new file mode 100644
index 000000000000..083d2e62189a
--- /dev/null
+++ b/drivers/siox/Kconfig
@@ -0,0 +1,18 @@
+menuconfig SIOX
+ tristate "Eckelmann SIOX Support"
+ help
+ SIOX stands for Serial Input Output eXtension and is a synchronous
+ bus system invented by Eckelmann AG. It is used in their control and
+ remote monitoring systems for commercial and industrial refrigeration
+ to drive additional I/O units.
+
+ Unless you know better, it is probably safe to say "no" here.
+
+if SIOX
+
+config SIOX_BUS_GPIO
+ tristate "SIOX GPIO bus driver"
+ help
+ SIOX bus driver that controls the four bus lines using GPIOs.
+
+endif
diff --git a/drivers/siox/Makefile b/drivers/siox/Makefile
new file mode 100644
index 000000000000..a956f65206d5
--- /dev/null
+++ b/drivers/siox/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SIOX) += siox-core.o
+obj-$(CONFIG_SIOX_BUS_GPIO) += siox-bus-gpio.o
diff --git a/drivers/siox/siox-bus-gpio.c b/drivers/siox/siox-bus-gpio.c
new file mode 100644
index 000000000000..ea7ef982968b
--- /dev/null
+++ b/drivers/siox/siox-bus-gpio.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2017 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/delay.h>
+
+#include "siox.h"
+
+#define DRIVER_NAME "siox-gpio"
+
+struct siox_gpio_ddata {
+ struct gpio_desc *din;
+ struct gpio_desc *dout;
+ struct gpio_desc *dclk;
+ struct gpio_desc *dld;
+};
+
+static unsigned int siox_clkhigh_ns = 1000;
+static unsigned int siox_loadhigh_ns;
+static unsigned int siox_bytegap_ns;
+
+static int siox_gpio_pushpull(struct siox_master *smaster,
+ size_t setbuf_len, const u8 setbuf[],
+ size_t getbuf_len, u8 getbuf[])
+{
+ struct siox_gpio_ddata *ddata = siox_master_get_devdata(smaster);
+ size_t i;
+ size_t cycles = max(setbuf_len, getbuf_len);
+
+ /* reset data and clock */
+ gpiod_set_value_cansleep(ddata->dout, 0);
+ gpiod_set_value_cansleep(ddata->dclk, 0);
+
+ gpiod_set_value_cansleep(ddata->dld, 1);
+ ndelay(siox_loadhigh_ns);
+ gpiod_set_value_cansleep(ddata->dld, 0);
+
+ for (i = 0; i < cycles; ++i) {
+ u8 set = 0, get = 0;
+ size_t j;
+
+ if (i >= cycles - setbuf_len)
+ set = setbuf[i - (cycles - setbuf_len)];
+
+ for (j = 0; j < 8; ++j) {
+ get <<= 1;
+ if (gpiod_get_value_cansleep(ddata->din))
+ get |= 1;
+
+ /* DOUT is logically inverted */
+ gpiod_set_value_cansleep(ddata->dout, !(set & 0x80));
+ set <<= 1;
+
+ gpiod_set_value_cansleep(ddata->dclk, 1);
+ ndelay(siox_clkhigh_ns);
+ gpiod_set_value_cansleep(ddata->dclk, 0);
+ }
+
+ if (i < getbuf_len)
+ getbuf[i] = get;
+
+ ndelay(siox_bytegap_ns);
+ }
+
+ gpiod_set_value_cansleep(ddata->dld, 1);
+ ndelay(siox_loadhigh_ns);
+ gpiod_set_value_cansleep(ddata->dld, 0);
+
+ /*
+ * Resetting dout isn't necessary protocol wise, but it makes the
+ * signals more pretty because the dout level is deterministic between
+ * cycles. Note that this only affects dout between the master and the
+ * first siox device. dout for the later devices depend on the output of
+ * the previous siox device.
+ */
+ gpiod_set_value_cansleep(ddata->dout, 0);
+
+ return 0;
+}
+
+static int siox_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct siox_gpio_ddata *ddata;
+ int ret;
+ struct siox_master *smaster;
+
+ smaster = siox_master_alloc(&pdev->dev, sizeof(*ddata));
+ if (!smaster) {
+ dev_err(dev, "failed to allocate siox master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, smaster);
+ ddata = siox_master_get_devdata(smaster);
+
+ ddata->din = devm_gpiod_get(dev, "din", GPIOD_IN);
+ if (IS_ERR(ddata->din)) {
+ ret = PTR_ERR(ddata->din);
+ dev_err(dev, "Failed to get %s GPIO: %d\n", "din", ret);
+ goto err;
+ }
+
+ ddata->dout = devm_gpiod_get(dev, "dout", GPIOD_OUT_LOW);
+ if (IS_ERR(ddata->dout)) {
+ ret = PTR_ERR(ddata->dout);
+ dev_err(dev, "Failed to get %s GPIO: %d\n", "dout", ret);
+ goto err;
+ }
+
+ ddata->dclk = devm_gpiod_get(dev, "dclk", GPIOD_OUT_LOW);
+ if (IS_ERR(ddata->dclk)) {
+ ret = PTR_ERR(ddata->dclk);
+ dev_err(dev, "Failed to get %s GPIO: %d\n", "dclk", ret);
+ goto err;
+ }
+
+ ddata->dld = devm_gpiod_get(dev, "dld", GPIOD_OUT_LOW);
+ if (IS_ERR(ddata->dld)) {
+ ret = PTR_ERR(ddata->dld);
+ dev_err(dev, "Failed to get %s GPIO: %d\n", "dld", ret);
+ goto err;
+ }
+
+ smaster->pushpull = siox_gpio_pushpull;
+ /* XXX: determine automatically like spi does */
+ smaster->busno = 0;
+
+ ret = siox_master_register(smaster);
+ if (ret) {
+ dev_err(dev, "Failed to register siox master: %d\n", ret);
+err:
+ siox_master_put(smaster);
+ }
+
+ return ret;
+}
+
+static int siox_gpio_remove(struct platform_device *pdev)
+{
+ struct siox_master *master = platform_get_drvdata(pdev);
+
+ siox_master_unregister(master);
+
+ return 0;
+}
+
+static const struct of_device_id siox_gpio_dt_ids[] = {
+ { .compatible = "eckelmann,siox-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, siox_gpio_dt_ids);
+
+static struct platform_driver siox_gpio_driver = {
+ .probe = siox_gpio_probe,
+ .remove = siox_gpio_remove,
+
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = siox_gpio_dt_ids,
+ },
+};
+module_platform_driver(siox_gpio_driver);
+
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
new file mode 100644
index 000000000000..fdfcdea25867
--- /dev/null
+++ b/drivers/siox/siox-core.c
@@ -0,0 +1,934 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2017 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "siox.h"
+
+/*
+ * The lowest bit in the SIOX status word signals if the in-device watchdog is
+ * ok. If the bit is set, the device is functional.
+ *
+ * On writing the watchdog timer is reset when this bit toggles.
+ */
+#define SIOX_STATUS_WDG 0x01
+
+/*
+ * Bits 1 to 3 of the status word read as the bitwise negation of what was
+ * clocked in before. The value clocked in is changed in each cycle and so
+ * allows to detect transmit/receive problems.
+ */
+#define SIOX_STATUS_COUNTER 0x0e
+
+/*
+ * Each Siox-Device has a 4 bit type number that is neither 0 nor 15. This is
+ * available in the upper nibble of the read status.
+ *
+ * On write these bits are DC.
+ */
+#define SIOX_STATUS_TYPE 0xf0
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/siox.h>
+
+static bool siox_is_registered;
+
+static void siox_master_lock(struct siox_master *smaster)
+{
+ mutex_lock(&smaster->lock);
+}
+
+static void siox_master_unlock(struct siox_master *smaster)
+{
+ mutex_unlock(&smaster->lock);
+}
+
+static inline u8 siox_status_clean(u8 status_read, u8 status_written)
+{
+ /*
+ * bits 3:1 of status sample the respective bit in the status
+ * byte written in the previous cycle but inverted. So if you wrote the
+ * status word as 0xa before (counter = 0b101), it is expected to get
+ * back the counter bits as 0b010.
+ *
+ * So given the last status written this function toggles the there
+ * unset counter bits in the read value such that the counter bits in
+ * the return value are all zero iff the bits were read as expected to
+ * simplify error detection.
+ */
+
+ return status_read ^ (~status_written & 0xe);
+}
+
+static bool siox_device_counter_error(struct siox_device *sdevice,
+ u8 status_clean)
+{
+ return (status_clean & SIOX_STATUS_COUNTER) != 0;
+}
+
+static bool siox_device_type_error(struct siox_device *sdevice, u8 status_clean)
+{
+ u8 statustype = (status_clean & SIOX_STATUS_TYPE) >> 4;
+
+ /*
+ * If the device knows which value the type bits should have, check
+ * against this value otherwise just rule out the invalid values 0b0000
+ * and 0b1111.
+ */
+ if (sdevice->statustype) {
+ if (statustype != sdevice->statustype)
+ return true;
+ } else {
+ switch (statustype) {
+ case 0:
+ case 0xf:
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool siox_device_wdg_error(struct siox_device *sdevice, u8 status_clean)
+{
+ return (status_clean & SIOX_STATUS_WDG) == 0;
+}
+
+/*
+ * If there is a type or counter error the device is called "unsynced".
+ */
+bool siox_device_synced(struct siox_device *sdevice)
+{
+ if (siox_device_type_error(sdevice, sdevice->status_read_clean))
+ return false;
+
+ return !siox_device_counter_error(sdevice, sdevice->status_read_clean);
+
+}
+EXPORT_SYMBOL_GPL(siox_device_synced);
+
+/*
+ * A device is called "connected" if it is synced and the watchdog is not
+ * asserted.
+ */
+bool siox_device_connected(struct siox_device *sdevice)
+{
+ if (!siox_device_synced(sdevice))
+ return false;
+
+ return !siox_device_wdg_error(sdevice, sdevice->status_read_clean);
+}
+EXPORT_SYMBOL_GPL(siox_device_connected);
+
+static void siox_poll(struct siox_master *smaster)
+{
+ struct siox_device *sdevice;
+ size_t i = smaster->setbuf_len;
+ unsigned int devno = 0;
+ int unsync_error = 0;
+
+ smaster->last_poll = jiffies;
+
+ /*
+ * The counter bits change in each second cycle, the watchdog bit
+ * toggles each time.
+ * The counter bits hold values from [0, 6]. 7 would be possible
+ * theoretically but the protocol designer considered that a bad idea
+ * for reasons unknown today. (Maybe that's because then the status read
+ * back has only zeros in the counter bits then which might be confused
+ * with a stuck-at-0 error. But for the same reason (with s/0/1/) 0
+ * could be skipped.)
+ */
+ if (++smaster->status > 0x0d)
+ smaster->status = 0;
+
+ memset(smaster->buf, 0, smaster->setbuf_len);
+
+ /* prepare data pushed out to devices in buf[0..setbuf_len) */
+ list_for_each_entry(sdevice, &smaster->devices, node) {
+ struct siox_driver *sdriver =
+ to_siox_driver(sdevice->dev.driver);
+ sdevice->status_written = smaster->status;
+
+ i -= sdevice->inbytes;
+
+ /*
+ * If the device or a previous one is unsynced, don't pet the
+ * watchdog. This is done to ensure that the device is kept in
+ * reset when something is wrong.
+ */
+ if (!siox_device_synced(sdevice))
+ unsync_error = 1;
+
+ if (sdriver && !unsync_error)
+ sdriver->set_data(sdevice, sdevice->status_written,
+ &smaster->buf[i + 1]);
+ else
+ /*
+ * Don't trigger watchdog if there is no driver or a
+ * sync problem
+ */
+ sdevice->status_written &= ~SIOX_STATUS_WDG;
+
+ smaster->buf[i] = sdevice->status_written;
+
+ trace_siox_set_data(smaster, sdevice, devno, i);
+
+ devno++;
+ }
+
+ smaster->pushpull(smaster, smaster->setbuf_len, smaster->buf,
+ smaster->getbuf_len,
+ smaster->buf + smaster->setbuf_len);
+
+ unsync_error = 0;
+
+ /* interpret data pulled in from devices in buf[setbuf_len..] */
+ devno = 0;
+ i = smaster->setbuf_len;
+ list_for_each_entry(sdevice, &smaster->devices, node) {
+ struct siox_driver *sdriver =
+ to_siox_driver(sdevice->dev.driver);
+ u8 status = smaster->buf[i + sdevice->outbytes - 1];
+ u8 status_clean;
+ u8 prev_status_clean = sdevice->status_read_clean;
+ bool synced = true;
+ bool connected = true;
+
+ if (!siox_device_synced(sdevice))
+ unsync_error = 1;
+
+ /*
+ * If the watchdog bit wasn't toggled in this cycle, report the
+ * watchdog as active to give a consistent view for drivers and
+ * sysfs consumers.
+ */
+ if (!sdriver || unsync_error)
+ status &= ~SIOX_STATUS_WDG;
+
+ status_clean =
+ siox_status_clean(status,
+ sdevice->status_written_lastcycle);
+
+ /* Check counter bits */
+ if (siox_device_counter_error(sdevice, status_clean)) {
+ bool prev_counter_error;
+
+ synced = false;
+
+ /* only report a new error if the last cycle was ok */
+ prev_counter_error =
+ siox_device_counter_error(sdevice,
+ prev_status_clean);
+ if (!prev_counter_error) {
+ sdevice->status_errors++;
+ sysfs_notify_dirent(sdevice->status_errors_kn);
+ }
+ }
+
+ /* Check type bits */
+ if (siox_device_type_error(sdevice, status_clean))
+ synced = false;
+
+ /* If the device is unsynced report the watchdog as active */
+ if (!synced) {
+ status &= ~SIOX_STATUS_WDG;
+ status_clean &= ~SIOX_STATUS_WDG;
+ }
+
+ if (siox_device_wdg_error(sdevice, status_clean))
+ connected = false;
+
+ /* The watchdog state changed just now */
+ if ((status_clean ^ prev_status_clean) & SIOX_STATUS_WDG) {
+ sysfs_notify_dirent(sdevice->watchdog_kn);
+
+ if (siox_device_wdg_error(sdevice, status_clean)) {
+ struct kernfs_node *wd_errs =
+ sdevice->watchdog_errors_kn;
+
+ sdevice->watchdog_errors++;
+ sysfs_notify_dirent(wd_errs);
+ }
+ }
+
+ if (connected != sdevice->connected)
+ sysfs_notify_dirent(sdevice->connected_kn);
+
+ sdevice->status_read_clean = status_clean;
+ sdevice->status_written_lastcycle = sdevice->status_written;
+ sdevice->connected = connected;
+
+ trace_siox_get_data(smaster, sdevice, devno, status_clean, i);
+
+ /* only give data read to driver if the device is connected */
+ if (sdriver && connected)
+ sdriver->get_data(sdevice, &smaster->buf[i]);
+
+ devno++;
+ i += sdevice->outbytes;
+ }
+}
+
+static int siox_poll_thread(void *data)
+{
+ struct siox_master *smaster = data;
+ signed long timeout = 0;
+
+ get_device(&smaster->dev);
+
+ for (;;) {
+ if (kthread_should_stop()) {
+ put_device(&smaster->dev);
+ return 0;
+ }
+
+ siox_master_lock(smaster);
+
+ if (smaster->active) {
+ unsigned long next_poll =
+ smaster->last_poll + smaster->poll_interval;
+ if (time_is_before_eq_jiffies(next_poll))
+ siox_poll(smaster);
+
+ timeout = smaster->poll_interval -
+ (jiffies - smaster->last_poll);
+ } else {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ }
+
+ /*
+ * Set the task to idle while holding the lock. This makes sure
+ * that we don't sleep too long when the bus is reenabled before
+ * schedule_timeout is reached.
+ */
+ if (timeout > 0)
+ set_current_state(TASK_IDLE);
+
+ siox_master_unlock(smaster);
+
+ if (timeout > 0)
+ schedule_timeout(timeout);
+
+ /*
+ * I'm not clear if/why it is important to set the state to
+ * RUNNING again, but it fixes a "do not call blocking ops when
+ * !TASK_RUNNING;"-warning.
+ */
+ set_current_state(TASK_RUNNING);
+ }
+}
+
+static int __siox_start(struct siox_master *smaster)
+{
+ if (!(smaster->setbuf_len + smaster->getbuf_len))
+ return -ENODEV;
+
+ if (!smaster->buf)
+ return -ENOMEM;
+
+ if (smaster->active)
+ return 0;
+
+ smaster->active = 1;
+ wake_up_process(smaster->poll_thread);
+
+ return 1;
+}
+
+static int siox_start(struct siox_master *smaster)
+{
+ int ret;
+
+ siox_master_lock(smaster);
+ ret = __siox_start(smaster);
+ siox_master_unlock(smaster);
+
+ return ret;
+}
+
+static int __siox_stop(struct siox_master *smaster)
+{
+ if (smaster->active) {
+ struct siox_device *sdevice;
+
+ smaster->active = 0;
+
+ list_for_each_entry(sdevice, &smaster->devices, node) {
+ if (sdevice->connected)
+ sysfs_notify_dirent(sdevice->connected_kn);
+ sdevice->connected = false;
+ }
+
+ return 1;
+ }
+ return 0;
+}
+
+static int siox_stop(struct siox_master *smaster)
+{
+ int ret;
+
+ siox_master_lock(smaster);
+ ret = __siox_stop(smaster);
+ siox_master_unlock(smaster);
+
+ return ret;
+}
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct siox_device *sdev = to_siox_device(dev);
+
+ return sprintf(buf, "%s\n", sdev->type);
+}
+
+static DEVICE_ATTR_RO(type);
+
+static ssize_t inbytes_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct siox_device *sdev = to_siox_device(dev);
+
+ return sprintf(buf, "%zu\n", sdev->inbytes);
+}
+
+static DEVICE_ATTR_RO(inbytes);
+
+static ssize_t outbytes_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct siox_device *sdev = to_siox_device(dev);
+
+ return sprintf(buf, "%zu\n", sdev->outbytes);
+}
+
+static DEVICE_ATTR_RO(outbytes);
+
+static ssize_t status_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct siox_device *sdev = to_siox_device(dev);
+ unsigned int status_errors;
+
+ siox_master_lock(sdev->smaster);
+
+ status_errors = sdev->status_errors;
+
+ siox_master_unlock(sdev->smaster);
+
+ return sprintf(buf, "%u\n", status_errors);
+}
+
+static DEVICE_ATTR_RO(status_errors);
+
+static ssize_t connected_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct siox_device *sdev = to_siox_device(dev);
+ bool connected;
+
+ siox_master_lock(sdev->smaster);
+
+ connected = sdev->connected;
+
+ siox_master_unlock(sdev->smaster);
+
+ return sprintf(buf, "%u\n", connected);
+}
+
+static DEVICE_ATTR_RO(connected);
+
+static ssize_t watchdog_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct siox_device *sdev = to_siox_device(dev);
+ u8 status;
+
+ siox_master_lock(sdev->smaster);
+
+ status = sdev->status_read_clean;
+
+ siox_master_unlock(sdev->smaster);
+
+ return sprintf(buf, "%d\n", status & SIOX_STATUS_WDG);
+}
+
+static DEVICE_ATTR_RO(watchdog);
+
+static ssize_t watchdog_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct siox_device *sdev = to_siox_device(dev);
+ unsigned int watchdog_errors;
+
+ siox_master_lock(sdev->smaster);
+
+ watchdog_errors = sdev->watchdog_errors;
+
+ siox_master_unlock(sdev->smaster);
+
+ return sprintf(buf, "%u\n", watchdog_errors);
+}
+
+static DEVICE_ATTR_RO(watchdog_errors);
+
+static struct attribute *siox_device_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_inbytes.attr,
+ &dev_attr_outbytes.attr,
+ &dev_attr_status_errors.attr,
+ &dev_attr_connected.attr,
+ &dev_attr_watchdog.attr,
+ &dev_attr_watchdog_errors.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(siox_device);
+
+static void siox_device_release(struct device *dev)
+{
+ struct siox_device *sdevice = to_siox_device(dev);
+
+ kfree(sdevice);
+}
+
+static struct device_type siox_device_type = {
+ .groups = siox_device_groups,
+ .release = siox_device_release,
+};
+
+static int siox_match(struct device *dev, struct device_driver *drv)
+{
+ if (dev->type != &siox_device_type)
+ return 0;
+
+ /* up to now there is only a single driver so keeping this simple */
+ return 1;
+}
+
+static struct bus_type siox_bus_type = {
+ .name = "siox",
+ .match = siox_match,
+};
+
+static int siox_driver_probe(struct device *dev)
+{
+ struct siox_driver *sdriver = to_siox_driver(dev->driver);
+ struct siox_device *sdevice = to_siox_device(dev);
+ int ret;
+
+ ret = sdriver->probe(sdevice);
+ return ret;
+}
+
+static int siox_driver_remove(struct device *dev)
+{
+ struct siox_driver *sdriver =
+ container_of(dev->driver, struct siox_driver, driver);
+ struct siox_device *sdevice = to_siox_device(dev);
+ int ret;
+
+ ret = sdriver->remove(sdevice);
+ return ret;
+}
+
+static void siox_driver_shutdown(struct device *dev)
+{
+ struct siox_driver *sdriver =
+ container_of(dev->driver, struct siox_driver, driver);
+ struct siox_device *sdevice = to_siox_device(dev);
+
+ sdriver->shutdown(sdevice);
+}
+
+static ssize_t active_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct siox_master *smaster = to_siox_master(dev);
+
+ return sprintf(buf, "%d\n", smaster->active);
+}
+
+static ssize_t active_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct siox_master *smaster = to_siox_master(dev);
+ int ret;
+ int active;
+
+ ret = kstrtoint(buf, 0, &active);
+ if (ret < 0)
+ return ret;
+
+ if (active)
+ ret = siox_start(smaster);
+ else
+ ret = siox_stop(smaster);
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(active);
+
+static struct siox_device *siox_device_add(struct siox_master *smaster,
+ const char *type, size_t inbytes,
+ size_t outbytes, u8 statustype);
+
+static ssize_t device_add_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct siox_master *smaster = to_siox_master(dev);
+ int ret;
+ char type[20] = "";
+ size_t inbytes = 0, outbytes = 0;
+ u8 statustype = 0;
+
+ ret = sscanf(buf, "%20s %zu %zu %hhu", type, &inbytes,
+ &outbytes, &statustype);
+ if (ret != 3 && ret != 4)
+ return -EINVAL;
+
+ if (strcmp(type, "siox-12x8") || inbytes != 2 || outbytes != 4)
+ return -EINVAL;
+
+ siox_device_add(smaster, "siox-12x8", inbytes, outbytes, statustype);
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(device_add);
+
+static void siox_device_remove(struct siox_master *smaster);
+
+static ssize_t device_remove_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct siox_master *smaster = to_siox_master(dev);
+
+ /* XXX? require to write <type> <inbytes> <outbytes> */
+ siox_device_remove(smaster);
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(device_remove);
+
+static ssize_t poll_interval_ns_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct siox_master *smaster = to_siox_master(dev);
+
+ return sprintf(buf, "%lld\n", jiffies_to_nsecs(smaster->poll_interval));
+}
+
+static ssize_t poll_interval_ns_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct siox_master *smaster = to_siox_master(dev);
+ int ret;
+ u64 val;
+
+ ret = kstrtou64(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ siox_master_lock(smaster);
+
+ smaster->poll_interval = nsecs_to_jiffies(val);
+
+ siox_master_unlock(smaster);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(poll_interval_ns);
+
+static struct attribute *siox_master_attrs[] = {
+ &dev_attr_active.attr,
+ &dev_attr_device_add.attr,
+ &dev_attr_device_remove.attr,
+ &dev_attr_poll_interval_ns.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(siox_master);
+
+static void siox_master_release(struct device *dev)
+{
+ struct siox_master *smaster = to_siox_master(dev);
+
+ kfree(smaster);
+}
+
+static struct device_type siox_master_type = {
+ .groups = siox_master_groups,
+ .release = siox_master_release,
+};
+
+struct siox_master *siox_master_alloc(struct device *dev,
+ size_t size)
+{
+ struct siox_master *smaster;
+
+ if (!dev)
+ return NULL;
+
+ smaster = kzalloc(sizeof(*smaster) + size, GFP_KERNEL);
+ if (!smaster)
+ return NULL;
+
+ device_initialize(&smaster->dev);
+
+ smaster->busno = -1;
+ smaster->dev.bus = &siox_bus_type;
+ smaster->dev.type = &siox_master_type;
+ smaster->dev.parent = dev;
+ smaster->poll_interval = DIV_ROUND_UP(HZ, 40);
+
+ dev_set_drvdata(&smaster->dev, &smaster[1]);
+
+ return smaster;
+}
+EXPORT_SYMBOL_GPL(siox_master_alloc);
+
+int siox_master_register(struct siox_master *smaster)
+{
+ int ret;
+
+ if (!siox_is_registered)
+ return -EPROBE_DEFER;
+
+ if (!smaster->pushpull)
+ return -EINVAL;
+
+ dev_set_name(&smaster->dev, "siox-%d", smaster->busno);
+
+ smaster->last_poll = jiffies;
+ smaster->poll_thread = kthread_create(siox_poll_thread, smaster,
+ "siox-%d", smaster->busno);
+ if (IS_ERR(smaster->poll_thread)) {
+ smaster->active = 0;
+ return PTR_ERR(smaster->poll_thread);
+ }
+
+ mutex_init(&smaster->lock);
+ INIT_LIST_HEAD(&smaster->devices);
+
+ ret = device_add(&smaster->dev);
+ if (ret)
+ kthread_stop(smaster->poll_thread);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(siox_master_register);
+
+void siox_master_unregister(struct siox_master *smaster)
+{
+ /* remove device */
+ device_del(&smaster->dev);
+
+ siox_master_lock(smaster);
+
+ __siox_stop(smaster);
+
+ while (smaster->num_devices) {
+ struct siox_device *sdevice;
+
+ sdevice = container_of(smaster->devices.prev,
+ struct siox_device, node);
+ list_del(&sdevice->node);
+ smaster->num_devices--;
+
+ siox_master_unlock(smaster);
+
+ device_unregister(&sdevice->dev);
+
+ siox_master_lock(smaster);
+ }
+
+ siox_master_unlock(smaster);
+
+ put_device(&smaster->dev);
+}
+EXPORT_SYMBOL_GPL(siox_master_unregister);
+
+static struct siox_device *siox_device_add(struct siox_master *smaster,
+ const char *type, size_t inbytes,
+ size_t outbytes, u8 statustype)
+{
+ struct siox_device *sdevice;
+ int ret;
+ size_t buf_len;
+
+ sdevice = kzalloc(sizeof(*sdevice), GFP_KERNEL);
+ if (!sdevice)
+ return ERR_PTR(-ENOMEM);
+
+ sdevice->type = type;
+ sdevice->inbytes = inbytes;
+ sdevice->outbytes = outbytes;
+ sdevice->statustype = statustype;
+
+ sdevice->smaster = smaster;
+ sdevice->dev.parent = &smaster->dev;
+ sdevice->dev.bus = &siox_bus_type;
+ sdevice->dev.type = &siox_device_type;
+
+ siox_master_lock(smaster);
+
+ dev_set_name(&sdevice->dev, "siox-%d-%d",
+ smaster->busno, smaster->num_devices);
+
+ buf_len = smaster->setbuf_len + inbytes +
+ smaster->getbuf_len + outbytes;
+ if (smaster->buf_len < buf_len) {
+ u8 *buf = krealloc(smaster->buf, buf_len, GFP_KERNEL);
+
+ if (!buf) {
+ dev_err(&smaster->dev,
+ "failed to realloc buffer to %zu\n", buf_len);
+ ret = -ENOMEM;
+ goto err_buf_alloc;
+ }
+
+ smaster->buf_len = buf_len;
+ smaster->buf = buf;
+ }
+
+ ret = device_register(&sdevice->dev);
+ if (ret) {
+ dev_err(&smaster->dev, "failed to register device: %d\n", ret);
+
+ goto err_device_register;
+ }
+
+ smaster->num_devices++;
+ list_add_tail(&sdevice->node, &smaster->devices);
+
+ smaster->setbuf_len += sdevice->inbytes;
+ smaster->getbuf_len += sdevice->outbytes;
+
+ sdevice->status_errors_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
+ "status_errors");
+ sdevice->watchdog_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
+ "watchdog");
+ sdevice->watchdog_errors_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
+ "watchdog_errors");
+ sdevice->connected_kn = sysfs_get_dirent(sdevice->dev.kobj.sd,
+ "connected");
+
+ siox_master_unlock(smaster);
+
+ return sdevice;
+
+err_device_register:
+ /* don't care to make the buffer smaller again */
+
+err_buf_alloc:
+ siox_master_unlock(smaster);
+
+ kfree(sdevice);
+
+ return ERR_PTR(ret);
+}
+
+static void siox_device_remove(struct siox_master *smaster)
+{
+ struct siox_device *sdevice;
+
+ siox_master_lock(smaster);
+
+ if (!smaster->num_devices) {
+ siox_master_unlock(smaster);
+ return;
+ }
+
+ sdevice = container_of(smaster->devices.prev, struct siox_device, node);
+ list_del(&sdevice->node);
+ smaster->num_devices--;
+
+ smaster->setbuf_len -= sdevice->inbytes;
+ smaster->getbuf_len -= sdevice->outbytes;
+
+ if (!smaster->num_devices)
+ __siox_stop(smaster);
+
+ siox_master_unlock(smaster);
+
+ /*
+ * This must be done without holding the master lock because we're
+ * called from device_remove_store which also holds a sysfs mutex.
+ * device_unregister tries to aquire the same lock.
+ */
+ device_unregister(&sdevice->dev);
+}
+
+int __siox_driver_register(struct siox_driver *sdriver, struct module *owner)
+{
+ int ret;
+
+ if (unlikely(!siox_is_registered))
+ return -EPROBE_DEFER;
+
+ if (!sdriver->set_data && !sdriver->get_data) {
+ pr_err("Driver %s doesn't provide needed callbacks\n",
+ sdriver->driver.name);
+ return -EINVAL;
+ }
+
+ sdriver->driver.owner = owner;
+ sdriver->driver.bus = &siox_bus_type;
+
+ if (sdriver->probe)
+ sdriver->driver.probe = siox_driver_probe;
+ if (sdriver->remove)
+ sdriver->driver.remove = siox_driver_remove;
+ if (sdriver->shutdown)
+ sdriver->driver.shutdown = siox_driver_shutdown;
+
+ ret = driver_register(&sdriver->driver);
+ if (ret)
+ pr_err("Failed to register siox driver %s (%d)\n",
+ sdriver->driver.name, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__siox_driver_register);
+
+static int __init siox_init(void)
+{
+ int ret;
+
+ ret = bus_register(&siox_bus_type);
+ if (ret) {
+ pr_err("Registration of SIOX bus type failed: %d\n", ret);
+ return ret;
+ }
+
+ siox_is_registered = true;
+
+ return 0;
+}
+subsys_initcall(siox_init);
+
+static void __exit siox_exit(void)
+{
+ bus_unregister(&siox_bus_type);
+}
+module_exit(siox_exit);
+
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_DESCRIPTION("Eckelmann SIOX driver core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/siox/siox.h b/drivers/siox/siox.h
new file mode 100644
index 000000000000..c674bf6fb119
--- /dev/null
+++ b/drivers/siox/siox.h
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2017 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
+ */
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/siox.h>
+
+#define to_siox_master(_dev) container_of((_dev), struct siox_master, dev)
+struct siox_master {
+ /* these fields should be initialized by the driver */
+ int busno;
+ int (*pushpull)(struct siox_master *smaster,
+ size_t setbuf_len, const u8 setbuf[],
+ size_t getbuf_len, u8 getbuf[]);
+
+ /* might be initialized by the driver, if 0 it is set to HZ / 40 */
+ unsigned long poll_interval; /* in jiffies */
+
+ /* framework private stuff */
+ struct mutex lock;
+ bool active;
+ struct module *owner;
+ struct device dev;
+ unsigned int num_devices;
+ struct list_head devices;
+
+ size_t setbuf_len, getbuf_len;
+ size_t buf_len;
+ u8 *buf;
+ u8 status;
+
+ unsigned long last_poll;
+ struct task_struct *poll_thread;
+};
+
+static inline void *siox_master_get_devdata(struct siox_master *smaster)
+{
+ return dev_get_drvdata(&smaster->dev);
+}
+
+struct siox_master *siox_master_alloc(struct device *dev, size_t size);
+static inline void siox_master_put(struct siox_master *smaster)
+{
+ put_device(&smaster->dev);
+}
+
+int siox_master_register(struct siox_master *smaster);
+void siox_master_unregister(struct siox_master *smaster);
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
new file mode 100644
index 000000000000..1a632fad597e
--- /dev/null
+++ b/drivers/slimbus/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# SLIMbus driver configuration
+#
+menuconfig SLIMBUS
+ tristate "SLIMbus support"
+ help
+ SLIMbus is standard interface between System-on-Chip and audio codec,
+ and other peripheral components in typical embedded systems.
+
+ If unsure, choose N.
+
+if SLIMBUS
+
+# SLIMbus controllers
+config SLIM_QCOM_CTRL
+ tristate "Qualcomm SLIMbus Manager Component"
+ depends on SLIMBUS
+ depends on HAS_IOMEM
+ help
+ Select driver if Qualcomm's SLIMbus Manager Component is
+ programmed using Linux kernel.
+
+endif
diff --git a/drivers/slimbus/Makefile b/drivers/slimbus/Makefile
new file mode 100644
index 000000000000..a35a3da4eb78
--- /dev/null
+++ b/drivers/slimbus/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for kernel SLIMbus framework.
+#
+obj-$(CONFIG_SLIMBUS) += slimbus.o
+slimbus-y := core.o messaging.o sched.o
+
+#Controllers
+obj-$(CONFIG_SLIM_QCOM_CTRL) += slim-qcom-ctrl.o
+slim-qcom-ctrl-y := qcom-ctrl.o
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
new file mode 100644
index 000000000000..4988a8f4d905
--- /dev/null
+++ b/drivers/slimbus/core.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/idr.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/slimbus.h>
+#include "slimbus.h"
+
+static DEFINE_IDA(ctrl_ida);
+
+static const struct slim_device_id *slim_match(const struct slim_device_id *id,
+ const struct slim_device *sbdev)
+{
+ while (id->manf_id != 0 || id->prod_code != 0) {
+ if (id->manf_id == sbdev->e_addr.manf_id &&
+ id->prod_code == sbdev->e_addr.prod_code)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+static int slim_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct slim_device *sbdev = to_slim_device(dev);
+ struct slim_driver *sbdrv = to_slim_driver(drv);
+
+ return !!slim_match(sbdrv->id_table, sbdev);
+}
+
+static int slim_device_probe(struct device *dev)
+{
+ struct slim_device *sbdev = to_slim_device(dev);
+ struct slim_driver *sbdrv = to_slim_driver(dev->driver);
+
+ return sbdrv->probe(sbdev);
+}
+
+static int slim_device_remove(struct device *dev)
+{
+ struct slim_device *sbdev = to_slim_device(dev);
+ struct slim_driver *sbdrv;
+
+ if (dev->driver) {
+ sbdrv = to_slim_driver(dev->driver);
+ if (sbdrv->remove)
+ sbdrv->remove(sbdev);
+ }
+
+ return 0;
+}
+
+struct bus_type slimbus_bus = {
+ .name = "slimbus",
+ .match = slim_device_match,
+ .probe = slim_device_probe,
+ .remove = slim_device_remove,
+};
+EXPORT_SYMBOL_GPL(slimbus_bus);
+
+/*
+ * __slim_driver_register() - Client driver registration with SLIMbus
+ *
+ * @drv:Client driver to be associated with client-device.
+ * @owner: owning module/driver
+ *
+ * This API will register the client driver with the SLIMbus
+ * It is called from the driver's module-init function.
+ */
+int __slim_driver_register(struct slim_driver *drv, struct module *owner)
+{
+ /* ID table and probe are mandatory */
+ if (!drv->id_table || !drv->probe)
+ return -EINVAL;
+
+ drv->driver.bus = &slimbus_bus;
+ drv->driver.owner = owner;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__slim_driver_register);
+
+/*
+ * slim_driver_unregister() - Undo effect of slim_driver_register
+ *
+ * @drv: Client driver to be unregistered
+ */
+void slim_driver_unregister(struct slim_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(slim_driver_unregister);
+
+static void slim_dev_release(struct device *dev)
+{
+ struct slim_device *sbdev = to_slim_device(dev);
+
+ kfree(sbdev);
+}
+
+static int slim_add_device(struct slim_controller *ctrl,
+ struct slim_device *sbdev,
+ struct device_node *node)
+{
+ sbdev->dev.bus = &slimbus_bus;
+ sbdev->dev.parent = ctrl->dev;
+ sbdev->dev.release = slim_dev_release;
+ sbdev->dev.driver = NULL;
+ sbdev->ctrl = ctrl;
+
+ if (node)
+ sbdev->dev.of_node = of_node_get(node);
+
+ dev_set_name(&sbdev->dev, "%x:%x:%x:%x",
+ sbdev->e_addr.manf_id,
+ sbdev->e_addr.prod_code,
+ sbdev->e_addr.dev_index,
+ sbdev->e_addr.instance);
+
+ return device_register(&sbdev->dev);
+}
+
+static struct slim_device *slim_alloc_device(struct slim_controller *ctrl,
+ struct slim_eaddr *eaddr,
+ struct device_node *node)
+{
+ struct slim_device *sbdev;
+ int ret;
+
+ sbdev = kzalloc(sizeof(*sbdev), GFP_KERNEL);
+ if (!sbdev)
+ return NULL;
+
+ sbdev->e_addr = *eaddr;
+ ret = slim_add_device(ctrl, sbdev, node);
+ if (ret) {
+ kfree(sbdev);
+ return NULL;
+ }
+
+ return sbdev;
+}
+
+static void of_register_slim_devices(struct slim_controller *ctrl)
+{
+ struct device *dev = ctrl->dev;
+ struct device_node *node;
+
+ if (!ctrl->dev->of_node)
+ return;
+
+ for_each_child_of_node(ctrl->dev->of_node, node) {
+ struct slim_device *sbdev;
+ struct slim_eaddr e_addr;
+ const char *compat = NULL;
+ int reg[2], ret;
+ int manf_id, prod_code;
+
+ compat = of_get_property(node, "compatible", NULL);
+ if (!compat)
+ continue;
+
+ ret = sscanf(compat, "slim%x,%x", &manf_id, &prod_code);
+ if (ret != 2) {
+ dev_err(dev, "Manf ID & Product code not found %s\n",
+ compat);
+ continue;
+ }
+
+ ret = of_property_read_u32_array(node, "reg", reg, 2);
+ if (ret) {
+ dev_err(dev, "Device and Instance id not found:%d\n",
+ ret);
+ continue;
+ }
+
+ e_addr.dev_index = reg[0];
+ e_addr.instance = reg[1];
+ e_addr.manf_id = manf_id;
+ e_addr.prod_code = prod_code;
+
+ sbdev = slim_alloc_device(ctrl, &e_addr, node);
+ if (!sbdev)
+ continue;
+ }
+}
+
+/*
+ * slim_register_controller() - Controller bring-up and registration.
+ *
+ * @ctrl: Controller to be registered.
+ *
+ * A controller is registered with the framework using this API.
+ * If devices on a controller were registered before controller,
+ * this will make sure that they get probed when controller is up
+ */
+int slim_register_controller(struct slim_controller *ctrl)
+{
+ int id;
+
+ id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ ctrl->id = id;
+
+ if (!ctrl->min_cg)
+ ctrl->min_cg = SLIM_MIN_CLK_GEAR;
+ if (!ctrl->max_cg)
+ ctrl->max_cg = SLIM_MAX_CLK_GEAR;
+
+ ida_init(&ctrl->laddr_ida);
+ idr_init(&ctrl->tid_idr);
+ mutex_init(&ctrl->lock);
+ mutex_init(&ctrl->sched.m_reconf);
+ init_completion(&ctrl->sched.pause_comp);
+
+ dev_dbg(ctrl->dev, "Bus [%s] registered:dev:%p\n",
+ ctrl->name, ctrl->dev);
+
+ of_register_slim_devices(ctrl);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(slim_register_controller);
+
+/* slim_remove_device: Remove the effect of slim_add_device() */
+static void slim_remove_device(struct slim_device *sbdev)
+{
+ device_unregister(&sbdev->dev);
+}
+
+static int slim_ctrl_remove_device(struct device *dev, void *null)
+{
+ slim_remove_device(to_slim_device(dev));
+ return 0;
+}
+
+/**
+ * slim_unregister_controller() - Controller tear-down.
+ *
+ * @ctrl: Controller to tear-down.
+ */
+int slim_unregister_controller(struct slim_controller *ctrl)
+{
+ /* Remove all clients */
+ device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
+ /* Enter Clock Pause */
+ slim_ctrl_clk_pause(ctrl, false, 0);
+ ida_simple_remove(&ctrl_ida, ctrl->id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(slim_unregister_controller);
+
+static void slim_device_update_status(struct slim_device *sbdev,
+ enum slim_device_status status)
+{
+ struct slim_driver *sbdrv;
+
+ if (sbdev->status == status)
+ return;
+
+ sbdev->status = status;
+ if (!sbdev->dev.driver)
+ return;
+
+ sbdrv = to_slim_driver(sbdev->dev.driver);
+ if (sbdrv->device_status)
+ sbdrv->device_status(sbdev, sbdev->status);
+}
+
+/**
+ * slim_report_absent() - Controller calls this function when a device
+ * reports absent, OR when the device cannot be communicated with
+ *
+ * @sbdev: Device that cannot be reached, or sent report absent
+ */
+void slim_report_absent(struct slim_device *sbdev)
+{
+ struct slim_controller *ctrl = sbdev->ctrl;
+
+ if (!ctrl)
+ return;
+
+ /* invalidate logical addresses */
+ mutex_lock(&ctrl->lock);
+ sbdev->is_laddr_valid = false;
+ mutex_unlock(&ctrl->lock);
+
+ ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
+ slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
+}
+EXPORT_SYMBOL_GPL(slim_report_absent);
+
+static bool slim_eaddr_equal(struct slim_eaddr *a, struct slim_eaddr *b)
+{
+ return (a->manf_id == b->manf_id &&
+ a->prod_code == b->prod_code &&
+ a->dev_index == b->dev_index &&
+ a->instance == b->instance);
+}
+
+static int slim_match_dev(struct device *dev, void *data)
+{
+ struct slim_eaddr *e_addr = data;
+ struct slim_device *sbdev = to_slim_device(dev);
+
+ return slim_eaddr_equal(&sbdev->e_addr, e_addr);
+}
+
+static struct slim_device *find_slim_device(struct slim_controller *ctrl,
+ struct slim_eaddr *eaddr)
+{
+ struct slim_device *sbdev;
+ struct device *dev;
+
+ dev = device_find_child(ctrl->dev, eaddr, slim_match_dev);
+ if (dev) {
+ sbdev = to_slim_device(dev);
+ return sbdev;
+ }
+
+ return NULL;
+}
+
+/**
+ * slim_get_device() - get handle to a device.
+ *
+ * @ctrl: Controller on which this device will be added/queried
+ * @e_addr: Enumeration address of the device to be queried
+ *
+ * Return: pointer to a device if it has already reported. Creates a new
+ * device and returns pointer to it if the device has not yet enumerated.
+ */
+struct slim_device *slim_get_device(struct slim_controller *ctrl,
+ struct slim_eaddr *e_addr)
+{
+ struct slim_device *sbdev;
+
+ sbdev = find_slim_device(ctrl, e_addr);
+ if (!sbdev) {
+ sbdev = slim_alloc_device(ctrl, e_addr, NULL);
+ if (!sbdev)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return sbdev;
+}
+EXPORT_SYMBOL_GPL(slim_get_device);
+
+static int slim_device_alloc_laddr(struct slim_device *sbdev,
+ bool report_present)
+{
+ struct slim_controller *ctrl = sbdev->ctrl;
+ u8 laddr;
+ int ret;
+
+ mutex_lock(&ctrl->lock);
+ if (ctrl->get_laddr) {
+ ret = ctrl->get_laddr(ctrl, &sbdev->e_addr, &laddr);
+ if (ret < 0)
+ goto err;
+ } else if (report_present) {
+ ret = ida_simple_get(&ctrl->laddr_ida,
+ 0, SLIM_LA_MANAGER - 1, GFP_KERNEL);
+ if (ret < 0)
+ goto err;
+
+ laddr = ret;
+ } else {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (ctrl->set_laddr) {
+ ret = ctrl->set_laddr(ctrl, &sbdev->e_addr, laddr);
+ if (ret) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ sbdev->laddr = laddr;
+ sbdev->is_laddr_valid = true;
+
+ slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
+
+ dev_dbg(ctrl->dev, "setting slimbus l-addr:%x, ea:%x,%x,%x,%x\n",
+ laddr, sbdev->e_addr.manf_id, sbdev->e_addr.prod_code,
+ sbdev->e_addr.dev_index, sbdev->e_addr.instance);
+
+err:
+ mutex_unlock(&ctrl->lock);
+ return ret;
+
+}
+
+/**
+ * slim_device_report_present() - Report enumerated device.
+ *
+ * @ctrl: Controller with which device is enumerated.
+ * @e_addr: Enumeration address of the device.
+ * @laddr: Return logical address (if valid flag is false)
+ *
+ * Called by controller in response to REPORT_PRESENT. Framework will assign
+ * a logical address to this enumeration address.
+ * Function returns -EXFULL to indicate that all logical addresses are already
+ * taken.
+ */
+int slim_device_report_present(struct slim_controller *ctrl,
+ struct slim_eaddr *e_addr, u8 *laddr)
+{
+ struct slim_device *sbdev;
+ int ret;
+
+ ret = pm_runtime_get_sync(ctrl->dev);
+
+ if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
+ dev_err(ctrl->dev, "slim ctrl not active,state:%d, ret:%d\n",
+ ctrl->sched.clk_state, ret);
+ goto slimbus_not_active;
+ }
+
+ sbdev = slim_get_device(ctrl, e_addr);
+ if (IS_ERR(sbdev))
+ return -ENODEV;
+
+ if (sbdev->is_laddr_valid) {
+ *laddr = sbdev->laddr;
+ return 0;
+ }
+
+ ret = slim_device_alloc_laddr(sbdev, true);
+
+slimbus_not_active:
+ pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_put_autosuspend(ctrl->dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(slim_device_report_present);
+
+/**
+ * slim_get_logical_addr() - get/allocate logical address of a SLIMbus device.
+ *
+ * @sbdev: client handle requesting the address.
+ *
+ * Return: zero if a logical address is valid or a new logical address
+ * has been assigned. error code in case of error.
+ */
+int slim_get_logical_addr(struct slim_device *sbdev)
+{
+ if (!sbdev->is_laddr_valid)
+ return slim_device_alloc_laddr(sbdev, false);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(slim_get_logical_addr);
+
+static void __exit slimbus_exit(void)
+{
+ bus_unregister(&slimbus_bus);
+}
+module_exit(slimbus_exit);
+
+static int __init slimbus_init(void)
+{
+ return bus_register(&slimbus_bus);
+}
+postcore_initcall(slimbus_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SLIMbus core");
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
new file mode 100644
index 000000000000..884419c37e84
--- /dev/null
+++ b/drivers/slimbus/messaging.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include "slimbus.h"
+
+/**
+ * slim_msg_response() - Deliver Message response received from a device to the
+ * framework.
+ *
+ * @ctrl: Controller handle
+ * @reply: Reply received from the device
+ * @len: Length of the reply
+ * @tid: Transaction ID received with which framework can associate reply.
+ *
+ * Called by controller to inform framework about the response received.
+ * This helps in making the API asynchronous, and controller-driver doesn't need
+ * to manage 1 more table other than the one managed by framework mapping TID
+ * with buffers
+ */
+void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
+{
+ struct slim_msg_txn *txn;
+ struct slim_val_inf *msg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ txn = idr_find(&ctrl->tid_idr, tid);
+ if (txn == NULL) {
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ return;
+ }
+
+ msg = txn->msg;
+ if (msg == NULL || msg->rbuf == NULL) {
+ dev_err(ctrl->dev, "Got response to invalid TID:%d, len:%d\n",
+ tid, len);
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ return;
+ }
+
+ idr_remove(&ctrl->tid_idr, tid);
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+
+ memcpy(msg->rbuf, reply, len);
+ if (txn->comp)
+ complete(txn->comp);
+
+ /* Remove runtime-pm vote now that response was received for TID txn */
+ pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_put_autosuspend(ctrl->dev);
+}
+EXPORT_SYMBOL_GPL(slim_msg_response);
+
+/**
+ * slim_do_transfer() - Process a SLIMbus-messaging transaction
+ *
+ * @ctrl: Controller handle
+ * @txn: Transaction to be sent over SLIMbus
+ *
+ * Called by controller to transmit messaging transactions not dealing with
+ * Interface/Value elements. (e.g. transmittting a message to assign logical
+ * address to a slave device
+ *
+ * Return: -ETIMEDOUT: If transmission of this message timed out
+ * (e.g. due to bus lines not being clocked or driven by controller)
+ */
+int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ bool need_tid = false, clk_pause_msg = false;
+ unsigned long flags;
+ int ret, tid, timeout;
+
+ /*
+ * do not vote for runtime-PM if the transactions are part of clock
+ * pause sequence
+ */
+ if (ctrl->sched.clk_state == SLIM_CLK_ENTERING_PAUSE &&
+ (txn->mt == SLIM_MSG_MT_CORE &&
+ txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+ txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
+ clk_pause_msg = true;
+
+ if (!clk_pause_msg) {
+ ret = pm_runtime_get_sync(ctrl->dev);
+ if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
+ dev_err(ctrl->dev, "ctrl wrong state:%d, ret:%d\n",
+ ctrl->sched.clk_state, ret);
+ goto slim_xfer_err;
+ }
+ }
+
+ need_tid = slim_tid_txn(txn->mt, txn->mc);
+
+ if (need_tid) {
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ tid = idr_alloc(&ctrl->tid_idr, txn, 0,
+ SLIM_MAX_TIDS, GFP_ATOMIC);
+ txn->tid = tid;
+
+ if (!txn->msg->comp)
+ txn->comp = &done;
+ else
+ txn->comp = txn->comp;
+
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+
+ if (tid < 0)
+ return tid;
+ }
+
+ ret = ctrl->xfer_msg(ctrl, txn);
+
+ if (ret && need_tid && !txn->msg->comp) {
+ unsigned long ms = txn->rl + HZ;
+
+ timeout = wait_for_completion_timeout(txn->comp,
+ msecs_to_jiffies(ms));
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ idr_remove(&ctrl->tid_idr, tid);
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ }
+ }
+
+ if (ret)
+ dev_err(ctrl->dev, "Tx:MT:0x%x, MC:0x%x, LA:0x%x failed:%d\n",
+ txn->mt, txn->mc, txn->la, ret);
+
+slim_xfer_err:
+ if (!clk_pause_msg && (!need_tid || ret == -ETIMEDOUT)) {
+ /*
+ * remove runtime-pm vote if this was TX only, or
+ * if there was error during this transaction
+ */
+ pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_mark_last_busy(ctrl->dev);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(slim_do_transfer);
+
+static int slim_val_inf_sanity(struct slim_controller *ctrl,
+ struct slim_val_inf *msg, u8 mc)
+{
+ if (!msg || msg->num_bytes > 16 ||
+ (msg->start_offset + msg->num_bytes) > 0xC00)
+ goto reterr;
+ switch (mc) {
+ case SLIM_MSG_MC_REQUEST_VALUE:
+ case SLIM_MSG_MC_REQUEST_INFORMATION:
+ if (msg->rbuf != NULL)
+ return 0;
+ break;
+
+ case SLIM_MSG_MC_CHANGE_VALUE:
+ case SLIM_MSG_MC_CLEAR_INFORMATION:
+ if (msg->wbuf != NULL)
+ return 0;
+ break;
+
+ case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
+ case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
+ if (msg->rbuf != NULL && msg->wbuf != NULL)
+ return 0;
+ break;
+ }
+reterr:
+ if (msg)
+ dev_err(ctrl->dev, "Sanity check failed:msg:offset:0x%x, mc:%d\n",
+ msg->start_offset, mc);
+ return -EINVAL;
+}
+
+static u16 slim_slicesize(int code)
+{
+ static const u8 sizetocode[16] = {
+ 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7
+ };
+
+ clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
+
+ return sizetocode[code - 1];
+}
+
+/**
+ * slim_xfer_msg() - Transfer a value info message on slim device
+ *
+ * @sbdev: slim device to which this msg has to be transfered
+ * @msg: value info message pointer
+ * @mc: message code of the message
+ *
+ * Called by drivers which want to transfer a vlaue or info elements.
+ *
+ * Return: -ETIMEDOUT: If transmission of this message timed out
+ */
+int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg,
+ u8 mc)
+{
+ DEFINE_SLIM_LDEST_TXN(txn_stack, mc, 6, sbdev->laddr, msg);
+ struct slim_msg_txn *txn = &txn_stack;
+ struct slim_controller *ctrl = sbdev->ctrl;
+ int ret;
+ u16 sl;
+
+ if (!ctrl)
+ return -EINVAL;
+
+ ret = slim_val_inf_sanity(ctrl, msg, mc);
+ if (ret)
+ return ret;
+
+ sl = slim_slicesize(msg->num_bytes);
+
+ dev_dbg(ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
+ msg->start_offset, msg->num_bytes, mc, sl);
+
+ txn->ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
+
+ switch (mc) {
+ case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
+ case SLIM_MSG_MC_CHANGE_VALUE:
+ case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
+ case SLIM_MSG_MC_CLEAR_INFORMATION:
+ txn->rl += msg->num_bytes;
+ default:
+ break;
+ }
+
+ if (slim_tid_txn(txn->mt, txn->mc))
+ txn->rl++;
+
+ return slim_do_transfer(ctrl, txn);
+}
+EXPORT_SYMBOL_GPL(slim_xfer_msg);
+
+static void slim_fill_msg(struct slim_val_inf *msg, u32 addr,
+ size_t count, u8 *rbuf, u8 *wbuf)
+{
+ msg->start_offset = addr;
+ msg->num_bytes = count;
+ msg->rbuf = rbuf;
+ msg->wbuf = wbuf;
+}
+
+/**
+ * slim_read() - Read SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr: address of value element to read.
+ * @count: number of bytes to read. Maximum bytes allowed are 16.
+ * @val: will return what the value element value was
+ *
+ * Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
+ * this message timed out (e.g. due to bus lines not being clocked
+ * or driven by controller)
+ */
+int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val)
+{
+ struct slim_val_inf msg;
+
+ slim_fill_msg(&msg, addr, count, val, NULL);
+
+ return slim_xfer_msg(sdev, &msg, SLIM_MSG_MC_REQUEST_VALUE);
+}
+EXPORT_SYMBOL_GPL(slim_read);
+
+/**
+ * slim_readb() - Read byte from SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr: address in the value element to read.
+ *
+ * Return: byte value of value element.
+ */
+int slim_readb(struct slim_device *sdev, u32 addr)
+{
+ int ret;
+ u8 buf;
+
+ ret = slim_read(sdev, addr, 1, &buf);
+ if (ret < 0)
+ return ret;
+ else
+ return buf;
+}
+EXPORT_SYMBOL_GPL(slim_readb);
+
+/**
+ * slim_write() - Write SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr: address in the value element to write.
+ * @count: number of bytes to write. Maximum bytes allowed are 16.
+ * @val: value to write to value element
+ *
+ * Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
+ * this message timed out (e.g. due to bus lines not being clocked
+ * or driven by controller)
+ */
+int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val)
+{
+ struct slim_val_inf msg;
+
+ slim_fill_msg(&msg, addr, count, val, NULL);
+
+ return slim_xfer_msg(sdev, &msg, SLIM_MSG_MC_CHANGE_VALUE);
+}
+EXPORT_SYMBOL_GPL(slim_write);
+
+/**
+ * slim_writeb() - Write byte to SLIMbus value element
+ *
+ * @sdev: client handle.
+ * @addr: address of value element to write.
+ * @value: value to write to value element
+ *
+ * Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
+ * this message timed out (e.g. due to bus lines not being clocked
+ * or driven by controller)
+ *
+ */
+int slim_writeb(struct slim_device *sdev, u32 addr, u8 value)
+{
+ return slim_write(sdev, addr, 1, &value);
+}
+EXPORT_SYMBOL_GPL(slim_writeb);
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
new file mode 100644
index 000000000000..ffb46f915334
--- /dev/null
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -0,0 +1,747 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include "slimbus.h"
+
+/* Manager registers */
+#define MGR_CFG 0x200
+#define MGR_STATUS 0x204
+#define MGR_INT_EN 0x210
+#define MGR_INT_STAT 0x214
+#define MGR_INT_CLR 0x218
+#define MGR_TX_MSG 0x230
+#define MGR_RX_MSG 0x270
+#define MGR_IE_STAT 0x2F0
+#define MGR_VE_STAT 0x300
+#define MGR_CFG_ENABLE 1
+
+/* Framer registers */
+#define FRM_CFG 0x400
+#define FRM_STAT 0x404
+#define FRM_INT_EN 0x410
+#define FRM_INT_STAT 0x414
+#define FRM_INT_CLR 0x418
+#define FRM_WAKEUP 0x41C
+#define FRM_CLKCTL_DONE 0x420
+#define FRM_IE_STAT 0x430
+#define FRM_VE_STAT 0x440
+
+/* Interface registers */
+#define INTF_CFG 0x600
+#define INTF_STAT 0x604
+#define INTF_INT_EN 0x610
+#define INTF_INT_STAT 0x614
+#define INTF_INT_CLR 0x618
+#define INTF_IE_STAT 0x630
+#define INTF_VE_STAT 0x640
+
+/* Interrupt status bits */
+#define MGR_INT_TX_NACKED_2 BIT(25)
+#define MGR_INT_MSG_BUF_CONTE BIT(26)
+#define MGR_INT_RX_MSG_RCVD BIT(30)
+#define MGR_INT_TX_MSG_SENT BIT(31)
+
+/* Framer config register settings */
+#define FRM_ACTIVE 1
+#define CLK_GEAR 7
+#define ROOT_FREQ 11
+#define REF_CLK_GEAR 15
+#define INTR_WAKE 19
+
+#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
+ ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
+
+#define SLIM_ROOT_FREQ 24576000
+#define QCOM_SLIM_AUTOSUSPEND 1000
+
+/* MAX message size over control channel */
+#define SLIM_MSGQ_BUF_LEN 40
+#define QCOM_TX_MSGS 2
+#define QCOM_RX_MSGS 8
+#define QCOM_BUF_ALLOC_RETRIES 10
+
+#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
+
+/* V2 Component registers */
+#define CFG_PORT_V2(r) ((r ## _V2))
+#define COMP_CFG_V2 4
+#define COMP_TRUST_CFG_V2 0x3000
+
+/* V1 Component registers */
+#define CFG_PORT_V1(r) ((r ## _V1))
+#define COMP_CFG_V1 0
+#define COMP_TRUST_CFG_V1 0x14
+
+/* Resource group info for manager, and non-ported generic device-components */
+#define EE_MGR_RSC_GRP (1 << 10)
+#define EE_NGD_2 (2 << 6)
+#define EE_NGD_1 0
+
+struct slim_ctrl_buf {
+ void *base;
+ spinlock_t lock;
+ int head;
+ int tail;
+ int sl_sz;
+ int n;
+};
+
+struct qcom_slim_ctrl {
+ struct slim_controller ctrl;
+ struct slim_framer framer;
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *slew_reg;
+
+ struct slim_ctrl_buf rx;
+ struct slim_ctrl_buf tx;
+
+ struct completion **wr_comp;
+ int irq;
+ struct workqueue_struct *rxwq;
+ struct work_struct wd;
+ struct clk *rclk;
+ struct clk *hclk;
+};
+
+static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf,
+ u8 len, u32 tx_reg)
+{
+ int count = (len + 3) >> 2;
+
+ __iowrite32_copy(ctrl->base + tx_reg, buf, count);
+
+ /* Ensure Oder of subsequent writes */
+ mb();
+}
+
+static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl)
+{
+ unsigned long flags;
+ int idx;
+
+ spin_lock_irqsave(&ctrl->rx.lock, flags);
+ if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) {
+ spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+ dev_err(ctrl->dev, "RX QUEUE full!");
+ return NULL;
+ }
+ idx = ctrl->rx.tail;
+ ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n;
+ spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+
+ return ctrl->rx.base + (idx * ctrl->rx.sl_sz);
+}
+
+static void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err)
+{
+ struct completion *comp;
+ unsigned long flags;
+ int idx;
+
+ spin_lock_irqsave(&ctrl->tx.lock, flags);
+ idx = ctrl->tx.head;
+ ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n;
+ spin_unlock_irqrestore(&ctrl->tx.lock, flags);
+
+ comp = ctrl->wr_comp[idx];
+ ctrl->wr_comp[idx] = NULL;
+
+ complete(comp);
+}
+
+static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl,
+ u32 stat)
+{
+ int err = 0;
+
+ if (stat & MGR_INT_TX_MSG_SENT)
+ writel_relaxed(MGR_INT_TX_MSG_SENT,
+ ctrl->base + MGR_INT_CLR);
+
+ if (stat & MGR_INT_TX_NACKED_2) {
+ u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS);
+ u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT);
+ u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT);
+ u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG);
+ u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT);
+ u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT);
+ u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT);
+ u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT);
+ u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT);
+
+ writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR);
+
+ dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n",
+ stat, mgr_stat);
+ dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat);
+ dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n",
+ frm_intr_stat, frm_stat);
+ dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n",
+ frm_cfg, frm_ie_stat);
+ dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n",
+ intf_intr_stat, intf_stat);
+ dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n",
+ intf_ie_stat);
+ err = -ENOTCONN;
+ }
+
+ slim_ack_txn(ctrl, err);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl,
+ u32 stat)
+{
+ u32 *rx_buf, pkt[10];
+ bool q_rx = false;
+ u8 mc, mt, len;
+
+ pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG);
+ mt = SLIM_HEADER_GET_MT(pkt[0]);
+ len = SLIM_HEADER_GET_RL(pkt[0]);
+ mc = SLIM_HEADER_GET_MC(pkt[0]>>8);
+
+ /*
+ * this message cannot be handled by ISR, so
+ * let work-queue handle it
+ */
+ if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) {
+ rx_buf = (u32 *)slim_alloc_rxbuf(ctrl);
+ if (!rx_buf) {
+ dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n",
+ pkt[0]);
+ goto rx_ret_irq;
+ }
+ rx_buf[0] = pkt[0];
+
+ } else {
+ rx_buf = pkt;
+ }
+
+ __ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4,
+ DIV_ROUND_UP(len, 4));
+
+ switch (mc) {
+
+ case SLIM_MSG_MC_REPORT_PRESENT:
+ q_rx = true;
+ break;
+ case SLIM_MSG_MC_REPLY_INFORMATION:
+ case SLIM_MSG_MC_REPLY_VALUE:
+ slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1),
+ (u8)(*rx_buf >> 24), (len - 4));
+ break;
+ default:
+ dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n",
+ mc, mt);
+ break;
+ }
+rx_ret_irq:
+ writel(MGR_INT_RX_MSG_RCVD, ctrl->base +
+ MGR_INT_CLR);
+ if (q_rx)
+ queue_work(ctrl->rxwq, &ctrl->wd);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_slim_interrupt(int irq, void *d)
+{
+ struct qcom_slim_ctrl *ctrl = d;
+ u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT);
+ int ret = IRQ_NONE;
+
+ if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2)
+ ret = qcom_slim_handle_tx_irq(ctrl, stat);
+
+ if (stat & MGR_INT_RX_MSG_RCVD)
+ ret = qcom_slim_handle_rx_irq(ctrl, stat);
+
+ return ret;
+}
+
+static int qcom_clk_pause_wakeup(struct slim_controller *sctrl)
+{
+ struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+
+ clk_prepare_enable(ctrl->hclk);
+ clk_prepare_enable(ctrl->rclk);
+ enable_irq(ctrl->irq);
+
+ writel_relaxed(1, ctrl->base + FRM_WAKEUP);
+ /* Make sure framer wakeup write goes through before ISR fires */
+ mb();
+ /*
+ * HW Workaround: Currently, slave is reporting lost-sync messages
+ * after SLIMbus comes out of clock pause.
+ * Transaction with slave fail before slave reports that message
+ * Give some time for that report to come
+ * SLIMbus wakes up in clock gear 10 at 24.576MHz. With each superframe
+ * being 250 usecs, we wait for 5-10 superframes here to ensure
+ * we get the message
+ */
+ usleep_range(1250, 2500);
+ return 0;
+}
+
+static void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl,
+ struct slim_msg_txn *txn,
+ struct completion *done)
+{
+ unsigned long flags;
+ int idx;
+
+ spin_lock_irqsave(&ctrl->tx.lock, flags);
+ if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) {
+ spin_unlock_irqrestore(&ctrl->tx.lock, flags);
+ dev_err(ctrl->dev, "controller TX buf unavailable");
+ return NULL;
+ }
+ idx = ctrl->tx.tail;
+ ctrl->wr_comp[idx] = done;
+ ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n;
+
+ spin_unlock_irqrestore(&ctrl->tx.lock, flags);
+
+ return ctrl->tx.base + (idx * ctrl->tx.sl_sz);
+}
+
+
+static int qcom_xfer_msg(struct slim_controller *sctrl,
+ struct slim_msg_txn *txn)
+{
+ struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+ DECLARE_COMPLETION_ONSTACK(done);
+ void *pbuf = slim_alloc_txbuf(ctrl, txn, &done);
+ unsigned long ms = txn->rl + HZ;
+ u8 *puc;
+ int ret = 0, timeout, retries = QCOM_BUF_ALLOC_RETRIES;
+ u8 la = txn->la;
+ u32 *head;
+ /* HW expects length field to be excluded */
+ txn->rl--;
+
+ /* spin till buffer is made available */
+ if (!pbuf) {
+ while (retries--) {
+ usleep_range(10000, 15000);
+ pbuf = slim_alloc_txbuf(ctrl, txn, &done);
+ if (pbuf)
+ break;
+ }
+ }
+
+ if (retries < 0 && !pbuf)
+ return -ENOMEM;
+
+ puc = (u8 *)pbuf;
+ head = (u32 *)pbuf;
+
+ if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
+ *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
+ txn->mc, 0, la);
+ puc += 3;
+ } else {
+ *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
+ txn->mc, 1, la);
+ puc += 2;
+ }
+
+ if (slim_tid_txn(txn->mt, txn->mc))
+ *(puc++) = txn->tid;
+
+ if (slim_ec_txn(txn->mt, txn->mc)) {
+ *(puc++) = (txn->ec & 0xFF);
+ *(puc++) = (txn->ec >> 8) & 0xFF;
+ }
+
+ if (txn->msg && txn->msg->wbuf)
+ memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
+
+ qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
+ timeout = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
+
+ if (!timeout) {
+ dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+ txn->mt);
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+
+}
+
+static int qcom_set_laddr(struct slim_controller *sctrl,
+ struct slim_eaddr *ead, u8 laddr)
+{
+ struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+ struct {
+ __be16 manf_id;
+ __be16 prod_code;
+ u8 dev_index;
+ u8 instance;
+ u8 laddr;
+ } __packed p;
+ struct slim_val_inf msg = {0};
+ DEFINE_SLIM_EDEST_TXN(txn, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
+ 10, laddr, &msg);
+ int ret;
+
+ p.manf_id = cpu_to_be16(ead->manf_id);
+ p.prod_code = cpu_to_be16(ead->prod_code);
+ p.dev_index = ead->dev_index;
+ p.instance = ead->instance;
+ p.laddr = laddr;
+
+ msg.wbuf = (void *)&p;
+ msg.num_bytes = 7;
+ ret = slim_do_transfer(&ctrl->ctrl, &txn);
+
+ if (ret)
+ dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n",
+ laddr, ret);
+ return ret;
+}
+
+static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->rx.lock, flags);
+ if (ctrl->rx.tail == ctrl->rx.head) {
+ spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+ return -ENODATA;
+ }
+ memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz),
+ ctrl->rx.sl_sz);
+
+ ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n;
+ spin_unlock_irqrestore(&ctrl->rx.lock, flags);
+
+ return 0;
+}
+
+static void qcom_slim_rxwq(struct work_struct *work)
+{
+ u8 buf[SLIM_MSGQ_BUF_LEN];
+ u8 mc, mt, len;
+ int ret;
+ struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl,
+ wd);
+
+ while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) {
+ len = SLIM_HEADER_GET_RL(buf[0]);
+ mt = SLIM_HEADER_GET_MT(buf[0]);
+ mc = SLIM_HEADER_GET_MC(buf[1]);
+ if (mt == SLIM_MSG_MT_CORE &&
+ mc == SLIM_MSG_MC_REPORT_PRESENT) {
+ struct slim_eaddr ea;
+ u8 laddr;
+
+ ea.manf_id = be16_to_cpup((__be16 *)&buf[2]);
+ ea.prod_code = be16_to_cpup((__be16 *)&buf[4]);
+ ea.dev_index = buf[6];
+ ea.instance = buf[7];
+
+ ret = slim_device_report_present(&ctrl->ctrl, &ea,
+ &laddr);
+ if (ret < 0)
+ dev_err(ctrl->dev, "assign laddr failed:%d\n",
+ ret);
+ } else {
+ dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n",
+ mc, mt);
+ }
+ }
+}
+
+static void qcom_slim_prg_slew(struct platform_device *pdev,
+ struct qcom_slim_ctrl *ctrl)
+{
+ struct resource *slew_mem;
+
+ if (!ctrl->slew_reg) {
+ /* SLEW RATE register for this SLIMbus */
+ slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "slew");
+ ctrl->slew_reg = devm_ioremap(&pdev->dev, slew_mem->start,
+ resource_size(slew_mem));
+ if (!ctrl->slew_reg)
+ return;
+ }
+
+ writel_relaxed(1, ctrl->slew_reg);
+ /* Make sure SLIMbus-slew rate enabling goes through */
+ wmb();
+}
+
+static int qcom_slim_probe(struct platform_device *pdev)
+{
+ struct qcom_slim_ctrl *ctrl;
+ struct slim_controller *sctrl;
+ struct resource *slim_mem;
+ int ret, ver;
+
+ ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->hclk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(ctrl->hclk))
+ return PTR_ERR(ctrl->hclk);
+
+ ctrl->rclk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(ctrl->rclk))
+ return PTR_ERR(ctrl->rclk);
+
+ ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ);
+ if (ret) {
+ dev_err(&pdev->dev, "ref-clock set-rate failed:%d\n", ret);
+ return ret;
+ }
+
+ ctrl->irq = platform_get_irq(pdev, 0);
+ if (!ctrl->irq) {
+ dev_err(&pdev->dev, "no slimbus IRQ\n");
+ return -ENODEV;
+ }
+
+ sctrl = &ctrl->ctrl;
+ sctrl->dev = &pdev->dev;
+ ctrl->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ctrl);
+ dev_set_drvdata(ctrl->dev, ctrl);
+
+ slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+ ctrl->base = devm_ioremap_resource(ctrl->dev, slim_mem);
+ if (IS_ERR(ctrl->base)) {
+ dev_err(&pdev->dev, "IOremap failed\n");
+ return PTR_ERR(ctrl->base);
+ }
+
+ sctrl->set_laddr = qcom_set_laddr;
+ sctrl->xfer_msg = qcom_xfer_msg;
+ sctrl->wakeup = qcom_clk_pause_wakeup;
+ ctrl->tx.n = QCOM_TX_MSGS;
+ ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN;
+ ctrl->rx.n = QCOM_RX_MSGS;
+ ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN;
+ ctrl->wr_comp = kzalloc(sizeof(struct completion *) * QCOM_TX_MSGS,
+ GFP_KERNEL);
+ if (!ctrl->wr_comp)
+ return -ENOMEM;
+
+ spin_lock_init(&ctrl->rx.lock);
+ spin_lock_init(&ctrl->tx.lock);
+ INIT_WORK(&ctrl->wd, qcom_slim_rxwq);
+ ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx");
+ if (!ctrl->rxwq) {
+ dev_err(ctrl->dev, "Failed to start Rx WQ\n");
+ return -ENOMEM;
+ }
+
+ ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8;
+ ctrl->framer.superfreq =
+ ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+ sctrl->a_framer = &ctrl->framer;
+ sctrl->clkgear = SLIM_MAX_CLK_GEAR;
+
+ qcom_slim_prg_slew(pdev, ctrl);
+
+ ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt,
+ IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "request IRQ failed\n");
+ goto err_request_irq_failed;
+ }
+
+ ret = clk_prepare_enable(ctrl->hclk);
+ if (ret)
+ goto err_hclk_enable_failed;
+
+ ret = clk_prepare_enable(ctrl->rclk);
+ if (ret)
+ goto err_rclk_enable_failed;
+
+ ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz,
+ GFP_KERNEL);
+ if (!ctrl->tx.base) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz,
+ GFP_KERNEL);
+ if (!ctrl->rx.base) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Register with framework before enabling frame, clock */
+ ret = slim_register_controller(&ctrl->ctrl);
+ if (ret) {
+ dev_err(ctrl->dev, "error adding controller\n");
+ goto err;
+ }
+
+ ver = readl_relaxed(ctrl->base);
+ /* Version info in 16 MSbits */
+ ver >>= 16;
+ /* Component register initialization */
+ writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
+ writel((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
+ ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver));
+
+ writel((MGR_INT_TX_NACKED_2 |
+ MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
+ MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN);
+ writel(1, ctrl->base + MGR_CFG);
+ /* Framer register initialization */
+ writel((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
+ (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
+ ctrl->base + FRM_CFG);
+ writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG);
+ writel(1, ctrl->base + INTF_CFG);
+ writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, QCOM_SLIM_AUTOSUSPEND);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver);
+ return 0;
+
+err:
+ clk_disable_unprepare(ctrl->rclk);
+err_rclk_enable_failed:
+ clk_disable_unprepare(ctrl->hclk);
+err_hclk_enable_failed:
+err_request_irq_failed:
+ destroy_workqueue(ctrl->rxwq);
+ return ret;
+}
+
+static int qcom_slim_remove(struct platform_device *pdev)
+{
+ struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ slim_unregister_controller(&ctrl->ctrl);
+ destroy_workqueue(ctrl->rxwq);
+ return 0;
+}
+
+/*
+ * If PM_RUNTIME is not defined, these 2 functions become helper
+ * functions to be called from system suspend/resume.
+ */
+#ifdef CONFIG_PM
+static int qcom_slim_runtime_suspend(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
+ int ret;
+
+ dev_dbg(device, "pm_runtime: suspending...\n");
+ ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED);
+ if (ret) {
+ dev_err(device, "clk pause not entered:%d", ret);
+ } else {
+ disable_irq(ctrl->irq);
+ clk_disable_unprepare(ctrl->hclk);
+ clk_disable_unprepare(ctrl->rclk);
+ }
+ return ret;
+}
+
+static int qcom_slim_runtime_resume(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ dev_dbg(device, "pm_runtime: resuming...\n");
+ ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0);
+ if (ret)
+ dev_err(device, "clk pause not exited:%d", ret);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int qcom_slim_suspend(struct device *dev)
+{
+ int ret = 0;
+
+ if (!pm_runtime_enabled(dev) ||
+ (!pm_runtime_suspended(dev))) {
+ dev_dbg(dev, "system suspend");
+ ret = qcom_slim_runtime_suspend(dev);
+ }
+
+ return ret;
+}
+
+static int qcom_slim_resume(struct device *dev)
+{
+ if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
+ int ret;
+
+ dev_dbg(dev, "system resume");
+ ret = qcom_slim_runtime_resume(dev);
+ if (!ret) {
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+ }
+ return ret;
+
+ }
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops qcom_slim_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(qcom_slim_suspend, qcom_slim_resume)
+ SET_RUNTIME_PM_OPS(
+ qcom_slim_runtime_suspend,
+ qcom_slim_runtime_resume,
+ NULL
+ )
+};
+
+static const struct of_device_id qcom_slim_dt_match[] = {
+ { .compatible = "qcom,slim", },
+ { .compatible = "qcom,apq8064-slim", },
+ {}
+};
+
+static struct platform_driver qcom_slim_driver = {
+ .probe = qcom_slim_probe,
+ .remove = qcom_slim_remove,
+ .driver = {
+ .name = "qcom_slim_ctrl",
+ .of_match_table = qcom_slim_dt_match,
+ .pm = &qcom_slim_dev_pm_ops,
+ },
+};
+module_platform_driver(qcom_slim_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm SLIMbus Controller");
diff --git a/drivers/slimbus/sched.c b/drivers/slimbus/sched.c
new file mode 100644
index 000000000000..af84997d2742
--- /dev/null
+++ b/drivers/slimbus/sched.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#include <linux/errno.h>
+#include "slimbus.h"
+
+/**
+ * slim_ctrl_clk_pause() - Called by slimbus controller to enter/exit
+ * 'clock pause'
+ * @ctrl: controller requesting bus to be paused or woken up
+ * @wakeup: Wakeup this controller from clock pause.
+ * @restart: Restart time value per spec used for clock pause. This value
+ * isn't used when controller is to be woken up.
+ *
+ * Slimbus specification needs this sequence to turn-off clocks for the bus.
+ * The sequence involves sending 3 broadcast messages (reconfiguration
+ * sequence) to inform all devices on the bus.
+ * To exit clock-pause, controller typically wakes up active framer device.
+ * This API executes clock pause reconfiguration sequence if wakeup is false.
+ * If wakeup is true, controller's wakeup is called.
+ * For entering clock-pause, -EBUSY is returned if a message txn in pending.
+ */
+int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart)
+{
+ int i, ret = 0;
+ unsigned long flags;
+ struct slim_sched *sched = &ctrl->sched;
+ struct slim_val_inf msg = {0, 0, NULL, NULL};
+
+ DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
+ 3, SLIM_LA_MANAGER, &msg);
+
+ if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED)
+ return -EINVAL;
+
+ mutex_lock(&sched->m_reconf);
+ if (wakeup) {
+ if (sched->clk_state == SLIM_CLK_ACTIVE) {
+ mutex_unlock(&sched->m_reconf);
+ return 0;
+ }
+
+ /*
+ * Fine-tune calculation based on clock gear,
+ * message-bandwidth after bandwidth management
+ */
+ ret = wait_for_completion_timeout(&sched->pause_comp,
+ msecs_to_jiffies(100));
+ if (!ret) {
+ mutex_unlock(&sched->m_reconf);
+ pr_err("Previous clock pause did not finish");
+ return -ETIMEDOUT;
+ }
+ ret = 0;
+
+ /*
+ * Slimbus framework will call controller wakeup
+ * Controller should make sure that it sets active framer
+ * out of clock pause
+ */
+ if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
+ ret = ctrl->wakeup(ctrl);
+ if (!ret)
+ sched->clk_state = SLIM_CLK_ACTIVE;
+ mutex_unlock(&sched->m_reconf);
+
+ return ret;
+ }
+
+ /* already paused */
+ if (ctrl->sched.clk_state == SLIM_CLK_PAUSED) {
+ mutex_unlock(&sched->m_reconf);
+ return 0;
+ }
+
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ for (i = 0; i < SLIM_MAX_TIDS; i++) {
+ /* Pending response for a message */
+ if (idr_find(&ctrl->tid_idr, i)) {
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ mutex_unlock(&sched->m_reconf);
+ return -EBUSY;
+ }
+ }
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+
+ sched->clk_state = SLIM_CLK_ENTERING_PAUSE;
+
+ /* clock pause sequence */
+ ret = slim_do_transfer(ctrl, &txn);
+ if (ret)
+ goto clk_pause_ret;
+
+ txn.mc = SLIM_MSG_MC_NEXT_PAUSE_CLOCK;
+ txn.rl = 4;
+ msg.num_bytes = 1;
+ msg.wbuf = &restart;
+ ret = slim_do_transfer(ctrl, &txn);
+ if (ret)
+ goto clk_pause_ret;
+
+ txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+ txn.rl = 3;
+ msg.num_bytes = 1;
+ msg.wbuf = NULL;
+ ret = slim_do_transfer(ctrl, &txn);
+
+clk_pause_ret:
+ if (ret) {
+ sched->clk_state = SLIM_CLK_ACTIVE;
+ } else {
+ sched->clk_state = SLIM_CLK_PAUSED;
+ complete(&sched->pause_comp);
+ }
+ mutex_unlock(&sched->m_reconf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(slim_ctrl_clk_pause);
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
new file mode 100644
index 000000000000..79f8e05d92dd
--- /dev/null
+++ b/drivers/slimbus/slimbus.h
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation
+ */
+
+#ifndef _DRIVERS_SLIMBUS_H
+#define _DRIVERS_SLIMBUS_H
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/slimbus.h>
+
+/* Standard values per SLIMbus spec needed by controllers and devices */
+#define SLIM_CL_PER_SUPERFRAME 6144
+#define SLIM_CL_PER_SUPERFRAME_DIV8 (SLIM_CL_PER_SUPERFRAME >> 3)
+
+/* SLIMbus message types. Related to interpretation of message code. */
+#define SLIM_MSG_MT_CORE 0x0
+
+/*
+ * SLIM Broadcast header format
+ * BYTE 0: MT[7:5] RL[4:0]
+ * BYTE 1: RSVD[7] MC[6:0]
+ * BYTE 2: RSVD[7:6] DT[5:4] PI[3:0]
+ */
+#define SLIM_MSG_MT_MASK GENMASK(2, 0)
+#define SLIM_MSG_MT_SHIFT 5
+#define SLIM_MSG_RL_MASK GENMASK(4, 0)
+#define SLIM_MSG_RL_SHIFT 0
+#define SLIM_MSG_MC_MASK GENMASK(6, 0)
+#define SLIM_MSG_MC_SHIFT 0
+#define SLIM_MSG_DT_MASK GENMASK(1, 0)
+#define SLIM_MSG_DT_SHIFT 4
+
+#define SLIM_HEADER_GET_MT(b) ((b >> SLIM_MSG_MT_SHIFT) & SLIM_MSG_MT_MASK)
+#define SLIM_HEADER_GET_RL(b) ((b >> SLIM_MSG_RL_SHIFT) & SLIM_MSG_RL_MASK)
+#define SLIM_HEADER_GET_MC(b) ((b >> SLIM_MSG_MC_SHIFT) & SLIM_MSG_MC_MASK)
+#define SLIM_HEADER_GET_DT(b) ((b >> SLIM_MSG_DT_SHIFT) & SLIM_MSG_DT_MASK)
+
+/* Device management messages used by this framework */
+#define SLIM_MSG_MC_REPORT_PRESENT 0x1
+#define SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS 0x2
+#define SLIM_MSG_MC_REPORT_ABSENT 0xF
+
+/* Clock pause Reconfiguration messages */
+#define SLIM_MSG_MC_BEGIN_RECONFIGURATION 0x40
+#define SLIM_MSG_MC_NEXT_PAUSE_CLOCK 0x4A
+#define SLIM_MSG_MC_RECONFIGURE_NOW 0x5F
+
+/* Clock pause values per SLIMbus spec */
+#define SLIM_CLK_FAST 0
+#define SLIM_CLK_CONST_PHASE 1
+#define SLIM_CLK_UNSPECIFIED 2
+
+/* Destination type Values */
+#define SLIM_MSG_DEST_LOGICALADDR 0
+#define SLIM_MSG_DEST_ENUMADDR 1
+#define SLIM_MSG_DEST_BROADCAST 3
+
+/* Standard values per SLIMbus spec needed by controllers and devices */
+#define SLIM_MAX_CLK_GEAR 10
+#define SLIM_MIN_CLK_GEAR 1
+
+/* Manager's logical address is set to 0xFF per spec */
+#define SLIM_LA_MANAGER 0xFF
+
+#define SLIM_MAX_TIDS 256
+/**
+ * struct slim_framer - Represents SLIMbus framer.
+ * Every controller may have multiple framers. There is 1 active framer device
+ * responsible for clocking the bus.
+ * Manager is responsible for framer hand-over.
+ * @dev: Driver model representation of the device.
+ * @e_addr: Enumeration address of the framer.
+ * @rootfreq: Root Frequency at which the framer can run. This is maximum
+ * frequency ('clock gear 10') at which the bus can operate.
+ * @superfreq: Superframes per root frequency. Every frame is 6144 bits.
+ */
+struct slim_framer {
+ struct device dev;
+ struct slim_eaddr e_addr;
+ int rootfreq;
+ int superfreq;
+};
+
+#define to_slim_framer(d) container_of(d, struct slim_framer, dev)
+
+/**
+ * struct slim_msg_txn - Message to be sent by the controller.
+ * This structure has packet header,
+ * payload and buffer to be filled (if any)
+ * @rl: Header field. remaining length.
+ * @mt: Header field. Message type.
+ * @mc: Header field. LSB is message code for type mt.
+ * @dt: Header field. Destination type.
+ * @ec: Element code. Used for elemental access APIs.
+ * @tid: Transaction ID. Used for messages expecting response.
+ * (relevant for message-codes involving read operation)
+ * @la: Logical address of the device this message is going to.
+ * (Not used when destination type is broadcast.)
+ * @msg: Elemental access message to be read/written
+ * @comp: completion if read/write is synchronous, used internally
+ * for tid based transactions.
+ */
+struct slim_msg_txn {
+ u8 rl;
+ u8 mt;
+ u8 mc;
+ u8 dt;
+ u16 ec;
+ u8 tid;
+ u8 la;
+ struct slim_val_inf *msg;
+ struct completion *comp;
+};
+
+/* Frequently used message transaction structures */
+#define DEFINE_SLIM_LDEST_TXN(name, mc, rl, la, msg) \
+ struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_LOGICALADDR, 0,\
+ 0, la, msg, }
+
+#define DEFINE_SLIM_BCAST_TXN(name, mc, rl, la, msg) \
+ struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_BROADCAST, 0,\
+ 0, la, msg, }
+
+#define DEFINE_SLIM_EDEST_TXN(name, mc, rl, la, msg) \
+ struct slim_msg_txn name = { rl, 0, mc, SLIM_MSG_DEST_ENUMADDR, 0,\
+ 0, la, msg, }
+/**
+ * enum slim_clk_state: SLIMbus controller's clock state used internally for
+ * maintaining current clock state.
+ * @SLIM_CLK_ACTIVE: SLIMbus clock is active
+ * @SLIM_CLK_ENTERING_PAUSE: SLIMbus clock pause sequence is being sent on the
+ * bus. If this succeeds, state changes to SLIM_CLK_PAUSED. If the
+ * transition fails, state changes back to SLIM_CLK_ACTIVE
+ * @SLIM_CLK_PAUSED: SLIMbus controller clock has paused.
+ */
+enum slim_clk_state {
+ SLIM_CLK_ACTIVE,
+ SLIM_CLK_ENTERING_PAUSE,
+ SLIM_CLK_PAUSED,
+};
+
+/**
+ * struct slim_sched: Framework uses this structure internally for scheduling.
+ * @clk_state: Controller's clock state from enum slim_clk_state
+ * @pause_comp: Signals completion of clock pause sequence. This is useful when
+ * client tries to call SLIMbus transaction when controller is entering
+ * clock pause.
+ * @m_reconf: This mutex is held until current reconfiguration (data channel
+ * scheduling, message bandwidth reservation) is done. Message APIs can
+ * use the bus concurrently when this mutex is held since elemental access
+ * messages can be sent on the bus when reconfiguration is in progress.
+ */
+struct slim_sched {
+ enum slim_clk_state clk_state;
+ struct completion pause_comp;
+ struct mutex m_reconf;
+};
+
+/**
+ * struct slim_controller - Controls every instance of SLIMbus
+ * (similar to 'master' on SPI)
+ * @dev: Device interface to this driver
+ * @id: Board-specific number identifier for this controller/bus
+ * @name: Name for this controller
+ * @min_cg: Minimum clock gear supported by this controller (default value: 1)
+ * @max_cg: Maximum clock gear supported by this controller (default value: 10)
+ * @clkgear: Current clock gear in which this bus is running
+ * @laddr_ida: logical address id allocator
+ * @a_framer: Active framer which is clocking the bus managed by this controller
+ * @lock: Mutex protecting controller data structures
+ * @devices: Slim device list
+ * @tid_idr: tid id allocator
+ * @txn_lock: Lock to protect table of transactions
+ * @sched: scheduler structure used by the controller
+ * @xfer_msg: Transfer a message on this controller (this can be a broadcast
+ * control/status message like data channel setup, or a unicast message
+ * like value element read/write.
+ * @set_laddr: Setup logical address at laddr for the slave with elemental
+ * address e_addr. Drivers implementing controller will be expected to
+ * send unicast message to this device with its logical address.
+ * @get_laddr: It is possible that controller needs to set fixed logical
+ * address table and get_laddr can be used in that case so that controller
+ * can do this assignment. Use case is when the master is on the remote
+ * processor side, who is resposible for allocating laddr.
+ * @wakeup: This function pointer implements controller-specific procedure
+ * to wake it up from clock-pause. Framework will call this to bring
+ * the controller out of clock pause.
+ *
+ * 'Manager device' is responsible for device management, bandwidth
+ * allocation, channel setup, and port associations per channel.
+ * Device management means Logical address assignment/removal based on
+ * enumeration (report-present, report-absent) of a device.
+ * Bandwidth allocation is done dynamically by the manager based on active
+ * channels on the bus, message-bandwidth requests made by SLIMbus devices.
+ * Based on current bandwidth usage, manager chooses a frequency to run
+ * the bus at (in steps of 'clock-gear', 1 through 10, each clock gear
+ * representing twice the frequency than the previous gear).
+ * Manager is also responsible for entering (and exiting) low-power-mode
+ * (known as 'clock pause').
+ * Manager can do handover of framer if there are multiple framers on the
+ * bus and a certain usecase warrants using certain framer to avoid keeping
+ * previous framer being powered-on.
+ *
+ * Controller here performs duties of the manager device, and 'interface
+ * device'. Interface device is responsible for monitoring the bus and
+ * reporting information such as loss-of-synchronization, data
+ * slot-collision.
+ */
+struct slim_controller {
+ struct device *dev;
+ unsigned int id;
+ char name[SLIMBUS_NAME_SIZE];
+ int min_cg;
+ int max_cg;
+ int clkgear;
+ struct ida laddr_ida;
+ struct slim_framer *a_framer;
+ struct mutex lock;
+ struct list_head devices;
+ struct idr tid_idr;
+ spinlock_t txn_lock;
+ struct slim_sched sched;
+ int (*xfer_msg)(struct slim_controller *ctrl,
+ struct slim_msg_txn *tx);
+ int (*set_laddr)(struct slim_controller *ctrl,
+ struct slim_eaddr *ea, u8 laddr);
+ int (*get_laddr)(struct slim_controller *ctrl,
+ struct slim_eaddr *ea, u8 *laddr);
+ int (*wakeup)(struct slim_controller *ctrl);
+};
+
+int slim_device_report_present(struct slim_controller *ctrl,
+ struct slim_eaddr *e_addr, u8 *laddr);
+void slim_report_absent(struct slim_device *sbdev);
+int slim_register_controller(struct slim_controller *ctrl);
+int slim_unregister_controller(struct slim_controller *ctrl);
+void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 l);
+int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn);
+int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart);
+
+static inline bool slim_tid_txn(u8 mt, u8 mc)
+{
+ return (mt == SLIM_MSG_MT_CORE &&
+ (mc == SLIM_MSG_MC_REQUEST_INFORMATION ||
+ mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION ||
+ mc == SLIM_MSG_MC_REQUEST_VALUE ||
+ mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION));
+}
+
+static inline bool slim_ec_txn(u8 mt, u8 mc)
+{
+ return (mt == SLIM_MSG_MT_CORE &&
+ ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
+ mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
+ (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
+ mc <= SLIM_MSG_MC_CHANGE_VALUE)));
+}
+#endif /* _LINUX_SLIMBUS_H */
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
new file mode 100644
index 000000000000..b46084b4b1f8
--- /dev/null
+++ b/drivers/soundwire/Kconfig
@@ -0,0 +1,37 @@
+#
+# SoundWire subsystem configuration
+#
+
+menuconfig SOUNDWIRE
+ bool "SoundWire support"
+ ---help---
+ SoundWire is a 2-Pin interface with data and clock line ratified
+ by the MIPI Alliance. SoundWire is used for transporting data
+ typically related to audio functions. SoundWire interface is
+ optimized to integrate audio devices in mobile or mobile inspired
+ systems. Say Y to enable this subsystem, N if you do not have such
+ a device
+
+if SOUNDWIRE
+
+comment "SoundWire Devices"
+
+config SOUNDWIRE_BUS
+ tristate
+ select REGMAP_SOUNDWIRE
+
+config SOUNDWIRE_CADENCE
+ tristate
+
+config SOUNDWIRE_INTEL
+ tristate "Intel SoundWire Master driver"
+ select SOUNDWIRE_CADENCE
+ select SOUNDWIRE_BUS
+ depends on X86 && ACPI
+ ---help---
+ SoundWire Intel Master driver.
+ If you have an Intel platform which has a SoundWire Master then
+ enable this config option to get the SoundWire support for that
+ device.
+
+endif
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
new file mode 100644
index 000000000000..e1a74c5692aa
--- /dev/null
+++ b/drivers/soundwire/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for soundwire core
+#
+
+#Bus Objs
+soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o
+obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o
+
+#Cadence Objs
+soundwire-cadence-objs := cadence_master.o
+obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o
+
+#Intel driver
+soundwire-intel-objs := intel.o
+obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
+
+soundwire-intel-init-objs := intel_init.o
+obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel-init.o
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
new file mode 100644
index 000000000000..d6dc8e7a8614
--- /dev/null
+++ b/drivers/soundwire/bus.c
@@ -0,0 +1,997 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pm_runtime.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <linux/soundwire/sdw.h>
+#include "bus.h"
+
+/**
+ * sdw_add_bus_master() - add a bus Master instance
+ * @bus: bus instance
+ *
+ * Initializes the bus instance, read properties and create child
+ * devices.
+ */
+int sdw_add_bus_master(struct sdw_bus *bus)
+{
+ int ret;
+
+ if (!bus->dev) {
+ pr_err("SoundWire bus has no device");
+ return -ENODEV;
+ }
+
+ if (!bus->ops) {
+ dev_err(bus->dev, "SoundWire Bus ops are not set");
+ return -EINVAL;
+ }
+
+ mutex_init(&bus->msg_lock);
+ mutex_init(&bus->bus_lock);
+ INIT_LIST_HEAD(&bus->slaves);
+
+ if (bus->ops->read_prop) {
+ ret = bus->ops->read_prop(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Bus read properties failed:%d", ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Device numbers in SoundWire are 0 thru 15. Enumeration device
+ * number (0), Broadcast device number (15), Group numbers (12 and
+ * 13) and Master device number (14) are not used for assignment so
+ * mask these and other higher bits.
+ */
+
+ /* Set higher order bits */
+ *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM);
+
+ /* Set enumuration device number and broadcast device number */
+ set_bit(SDW_ENUM_DEV_NUM, bus->assigned);
+ set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned);
+
+ /* Set group device numbers and master device number */
+ set_bit(SDW_GROUP12_DEV_NUM, bus->assigned);
+ set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
+ set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
+
+ /*
+ * SDW is an enumerable bus, but devices can be powered off. So,
+ * they won't be able to report as present.
+ *
+ * Create Slave devices based on Slaves described in
+ * the respective firmware (ACPI/DT)
+ */
+ if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev))
+ ret = sdw_acpi_find_slaves(bus);
+ else
+ ret = -ENOTSUPP; /* No ACPI/DT so error out */
+
+ if (ret) {
+ dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_add_bus_master);
+
+static int sdw_delete_slave(struct device *dev, void *data)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct sdw_bus *bus = slave->bus;
+
+ mutex_lock(&bus->bus_lock);
+
+ if (slave->dev_num) /* clear dev_num if assigned */
+ clear_bit(slave->dev_num, bus->assigned);
+
+ list_del_init(&slave->node);
+ mutex_unlock(&bus->bus_lock);
+
+ device_unregister(dev);
+ return 0;
+}
+
+/**
+ * sdw_delete_bus_master() - delete the bus master instance
+ * @bus: bus to be deleted
+ *
+ * Remove the instance, delete the child devices.
+ */
+void sdw_delete_bus_master(struct sdw_bus *bus)
+{
+ device_for_each_child(bus->dev, NULL, sdw_delete_slave);
+}
+EXPORT_SYMBOL(sdw_delete_bus_master);
+
+/*
+ * SDW IO Calls
+ */
+
+static inline int find_response_code(enum sdw_command_response resp)
+{
+ switch (resp) {
+ case SDW_CMD_OK:
+ return 0;
+
+ case SDW_CMD_IGNORED:
+ return -ENODATA;
+
+ case SDW_CMD_TIMEOUT:
+ return -ETIMEDOUT;
+
+ default:
+ return -EIO;
+ }
+}
+
+static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
+{
+ int retry = bus->prop.err_threshold;
+ enum sdw_command_response resp;
+ int ret = 0, i;
+
+ for (i = 0; i <= retry; i++) {
+ resp = bus->ops->xfer_msg(bus, msg);
+ ret = find_response_code(resp);
+
+ /* if cmd is ok or ignored return */
+ if (ret == 0 || ret == -ENODATA)
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline int do_transfer_defer(struct sdw_bus *bus,
+ struct sdw_msg *msg, struct sdw_defer *defer)
+{
+ int retry = bus->prop.err_threshold;
+ enum sdw_command_response resp;
+ int ret = 0, i;
+
+ defer->msg = msg;
+ defer->length = msg->len;
+
+ for (i = 0; i <= retry; i++) {
+ resp = bus->ops->xfer_msg_defer(bus, msg, defer);
+ ret = find_response_code(resp);
+ /* if cmd is ok or ignored return */
+ if (ret == 0 || ret == -ENODATA)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int sdw_reset_page(struct sdw_bus *bus, u16 dev_num)
+{
+ int retry = bus->prop.err_threshold;
+ enum sdw_command_response resp;
+ int ret = 0, i;
+
+ for (i = 0; i <= retry; i++) {
+ resp = bus->ops->reset_page_addr(bus, dev_num);
+ ret = find_response_code(resp);
+ /* if cmd is ok or ignored return */
+ if (ret == 0 || ret == -ENODATA)
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * sdw_transfer() - Synchronous transfer message to a SDW Slave device
+ * @bus: SDW bus
+ * @msg: SDW message to be xfered
+ */
+int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
+{
+ int ret;
+
+ mutex_lock(&bus->msg_lock);
+
+ ret = do_transfer(bus, msg);
+ if (ret != 0 && ret != -ENODATA)
+ dev_err(bus->dev, "trf on Slave %d failed:%d\n",
+ msg->dev_num, ret);
+
+ if (msg->page)
+ sdw_reset_page(bus, msg->dev_num);
+
+ mutex_unlock(&bus->msg_lock);
+
+ return ret;
+}
+
+/**
+ * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
+ * @bus: SDW bus
+ * @msg: SDW message to be xfered
+ * @defer: Defer block for signal completion
+ *
+ * Caller needs to hold the msg_lock lock while calling this
+ */
+int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
+ struct sdw_defer *defer)
+{
+ int ret;
+
+ if (!bus->ops->xfer_msg_defer)
+ return -ENOTSUPP;
+
+ ret = do_transfer_defer(bus, msg, defer);
+ if (ret != 0 && ret != -ENODATA)
+ dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
+ msg->dev_num, ret);
+
+ if (msg->page)
+ sdw_reset_page(bus, msg->dev_num);
+
+ return ret;
+}
+
+
+int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
+ u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
+{
+ memset(msg, 0, sizeof(*msg));
+ msg->addr = addr; /* addr is 16 bit and truncated here */
+ msg->len = count;
+ msg->dev_num = dev_num;
+ msg->flags = flags;
+ msg->buf = buf;
+ msg->ssp_sync = false;
+ msg->page = false;
+
+ if (addr < SDW_REG_NO_PAGE) { /* no paging area */
+ return 0;
+ } else if (addr >= SDW_REG_MAX) { /* illegal addr */
+ pr_err("SDW: Invalid address %x passed\n", addr);
+ return -EINVAL;
+ }
+
+ if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */
+ if (slave && !slave->prop.paging_support)
+ return 0;
+ /* no need for else as that will fall thru to paging */
+ }
+
+ /* paging mandatory */
+ if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) {
+ pr_err("SDW: Invalid device for paging :%d\n", dev_num);
+ return -EINVAL;
+ }
+
+ if (!slave) {
+ pr_err("SDW: No slave for paging addr\n");
+ return -EINVAL;
+ } else if (!slave->prop.paging_support) {
+ dev_err(&slave->dev,
+ "address %x needs paging but no support", addr);
+ return -EINVAL;
+ }
+
+ msg->addr_page1 = (addr >> SDW_REG_SHIFT(SDW_SCP_ADDRPAGE1_MASK));
+ msg->addr_page2 = (addr >> SDW_REG_SHIFT(SDW_SCP_ADDRPAGE2_MASK));
+ msg->addr |= BIT(15);
+ msg->page = true;
+
+ return 0;
+}
+
+/**
+ * sdw_nread() - Read "n" contiguous SDW Slave registers
+ * @slave: SDW Slave
+ * @addr: Register address
+ * @count: length
+ * @val: Buffer for values to be read
+ */
+int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ struct sdw_msg msg;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, slave, addr, count,
+ slave->dev_num, SDW_MSG_FLAG_READ, val);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_get_sync(slave->bus->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = sdw_transfer(slave->bus, &msg);
+ pm_runtime_put(slave->bus->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdw_nread);
+
+/**
+ * sdw_nwrite() - Write "n" contiguous SDW Slave registers
+ * @slave: SDW Slave
+ * @addr: Register address
+ * @count: length
+ * @val: Buffer for values to be read
+ */
+int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ struct sdw_msg msg;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, slave, addr, count,
+ slave->dev_num, SDW_MSG_FLAG_WRITE, val);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_get_sync(slave->bus->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = sdw_transfer(slave->bus, &msg);
+ pm_runtime_put(slave->bus->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdw_nwrite);
+
+/**
+ * sdw_read() - Read a SDW Slave register
+ * @slave: SDW Slave
+ * @addr: Register address
+ */
+int sdw_read(struct sdw_slave *slave, u32 addr)
+{
+ u8 buf;
+ int ret;
+
+ ret = sdw_nread(slave, addr, 1, &buf);
+ if (ret < 0)
+ return ret;
+ else
+ return buf;
+}
+EXPORT_SYMBOL(sdw_read);
+
+/**
+ * sdw_write() - Write a SDW Slave register
+ * @slave: SDW Slave
+ * @addr: Register address
+ * @value: Register value
+ */
+int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
+{
+ return sdw_nwrite(slave, addr, 1, &value);
+
+}
+EXPORT_SYMBOL(sdw_write);
+
+/*
+ * SDW alert handling
+ */
+
+/* called with bus_lock held */
+static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
+{
+ struct sdw_slave *slave = NULL;
+
+ list_for_each_entry(slave, &bus->slaves, node) {
+ if (slave->dev_num == i)
+ return slave;
+ }
+
+ return NULL;
+}
+
+static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
+{
+
+ if ((slave->id.unique_id != id.unique_id) ||
+ (slave->id.mfg_id != id.mfg_id) ||
+ (slave->id.part_id != id.part_id) ||
+ (slave->id.class_id != id.class_id))
+ return -ENODEV;
+
+ return 0;
+}
+
+/* called with bus_lock held */
+static int sdw_get_device_num(struct sdw_slave *slave)
+{
+ int bit;
+
+ bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
+ if (bit == SDW_MAX_DEVICES) {
+ bit = -ENODEV;
+ goto err;
+ }
+
+ /*
+ * Do not update dev_num in Slave data structure here,
+ * Update once program dev_num is successful
+ */
+ set_bit(bit, slave->bus->assigned);
+
+err:
+ return bit;
+}
+
+static int sdw_assign_device_num(struct sdw_slave *slave)
+{
+ int ret, dev_num;
+
+ /* check first if device number is assigned, if so reuse that */
+ if (!slave->dev_num) {
+ mutex_lock(&slave->bus->bus_lock);
+ dev_num = sdw_get_device_num(slave);
+ mutex_unlock(&slave->bus->bus_lock);
+ if (dev_num < 0) {
+ dev_err(slave->bus->dev, "Get dev_num failed: %d",
+ dev_num);
+ return dev_num;
+ }
+ } else {
+ dev_info(slave->bus->dev,
+ "Slave already registered dev_num:%d",
+ slave->dev_num);
+
+ /* Clear the slave->dev_num to transfer message on device 0 */
+ dev_num = slave->dev_num;
+ slave->dev_num = 0;
+
+ }
+
+ ret = sdw_write(slave, SDW_SCP_DEVNUMBER, dev_num);
+ if (ret < 0) {
+ dev_err(&slave->dev, "Program device_num failed: %d", ret);
+ return ret;
+ }
+
+ /* After xfer of msg, restore dev_num */
+ slave->dev_num = dev_num;
+
+ return 0;
+}
+
+void sdw_extract_slave_id(struct sdw_bus *bus,
+ u64 addr, struct sdw_slave_id *id)
+{
+ dev_dbg(bus->dev, "SDW Slave Addr: %llx", addr);
+
+ /*
+ * Spec definition
+ * Register Bit Contents
+ * DevId_0 [7:4] 47:44 sdw_version
+ * DevId_0 [3:0] 43:40 unique_id
+ * DevId_1 39:32 mfg_id [15:8]
+ * DevId_2 31:24 mfg_id [7:0]
+ * DevId_3 23:16 part_id [15:8]
+ * DevId_4 15:08 part_id [7:0]
+ * DevId_5 07:00 class_id
+ */
+ id->sdw_version = (addr >> 44) & GENMASK(3, 0);
+ id->unique_id = (addr >> 40) & GENMASK(3, 0);
+ id->mfg_id = (addr >> 24) & GENMASK(15, 0);
+ id->part_id = (addr >> 8) & GENMASK(15, 0);
+ id->class_id = addr & GENMASK(7, 0);
+
+ dev_dbg(bus->dev,
+ "SDW Slave class_id %x, part_id %x, mfg_id %x, unique_id %x, version %x",
+ id->class_id, id->part_id, id->mfg_id,
+ id->unique_id, id->sdw_version);
+
+}
+
+static int sdw_program_device_num(struct sdw_bus *bus)
+{
+ u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
+ struct sdw_slave *slave, *_s;
+ struct sdw_slave_id id;
+ struct sdw_msg msg;
+ bool found = false;
+ int count = 0, ret;
+ u64 addr;
+
+ /* No Slave, so use raw xfer api */
+ ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
+ SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
+ if (ret < 0)
+ return ret;
+
+ do {
+ ret = sdw_transfer(bus, &msg);
+ if (ret == -ENODATA) { /* end of device id reads */
+ ret = 0;
+ break;
+ }
+ if (ret < 0) {
+ dev_err(bus->dev, "DEVID read fail:%d\n", ret);
+ break;
+ }
+
+ /*
+ * Construct the addr and extract. Cast the higher shift
+ * bits to avoid truncation due to size limit.
+ */
+ addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) |
+ ((u64)buf[2] << 24) | ((u64)buf[1] << 32) |
+ ((u64)buf[0] << 40);
+
+ sdw_extract_slave_id(bus, addr, &id);
+
+ /* Now compare with entries */
+ list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
+ if (sdw_compare_devid(slave, id) == 0) {
+ found = true;
+
+ /*
+ * Assign a new dev_num to this Slave and
+ * not mark it present. It will be marked
+ * present after it reports ATTACHED on new
+ * dev_num
+ */
+ ret = sdw_assign_device_num(slave);
+ if (ret) {
+ dev_err(slave->bus->dev,
+ "Assign dev_num failed:%d",
+ ret);
+ return ret;
+ }
+
+ break;
+ }
+ }
+
+ if (found == false) {
+ /* TODO: Park this device in Group 13 */
+ dev_err(bus->dev, "Slave Entry not found");
+ }
+
+ count++;
+
+ /*
+ * Check till error out or retry (count) exhausts.
+ * Device can drop off and rejoin during enumeration
+ * so count till twice the bound.
+ */
+
+ } while (ret == 0 && count < (SDW_MAX_DEVICES * 2));
+
+ return ret;
+}
+
+static void sdw_modify_slave_status(struct sdw_slave *slave,
+ enum sdw_slave_status status)
+{
+ mutex_lock(&slave->bus->bus_lock);
+ slave->status = status;
+ mutex_unlock(&slave->bus->bus_lock);
+}
+
+static int sdw_initialize_slave(struct sdw_slave *slave)
+{
+ struct sdw_slave_prop *prop = &slave->prop;
+ int ret;
+ u8 val;
+
+ /*
+ * Set bus clash, parity and SCP implementation
+ * defined interrupt mask
+ * TODO: Read implementation defined interrupt mask
+ * from Slave property
+ */
+ val = SDW_SCP_INT1_IMPL_DEF | SDW_SCP_INT1_BUS_CLASH |
+ SDW_SCP_INT1_PARITY;
+
+ /* Enable SCP interrupts */
+ ret = sdw_update(slave, SDW_SCP_INTMASK1, val, val);
+ if (ret < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_SCP_INTMASK1 write failed:%d", ret);
+ return ret;
+ }
+
+ /* No need to continue if DP0 is not present */
+ if (!slave->prop.dp0_prop)
+ return 0;
+
+ /* Enable DP0 interrupts */
+ val = prop->dp0_prop->device_interrupts;
+ val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
+
+ ret = sdw_update(slave, SDW_DP0_INTMASK, val, val);
+ if (ret < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_DP0_INTMASK read failed:%d", ret);
+ return val;
+ }
+
+ return 0;
+}
+
+static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
+{
+ u8 clear = 0, impl_int_mask;
+ int status, status2, ret, count = 0;
+
+ status = sdw_read(slave, SDW_DP0_INT);
+ if (status < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_DP0_INT read failed:%d", status);
+ return status;
+ }
+
+ do {
+
+ if (status & SDW_DP0_INT_TEST_FAIL) {
+ dev_err(&slave->dev, "Test fail for port 0");
+ clear |= SDW_DP0_INT_TEST_FAIL;
+ }
+
+ /*
+ * Assumption: PORT_READY interrupt will be received only for
+ * ports implementing Channel Prepare state machine (CP_SM)
+ */
+
+ if (status & SDW_DP0_INT_PORT_READY) {
+ complete(&slave->port_ready[0]);
+ clear |= SDW_DP0_INT_PORT_READY;
+ }
+
+ if (status & SDW_DP0_INT_BRA_FAILURE) {
+ dev_err(&slave->dev, "BRA failed");
+ clear |= SDW_DP0_INT_BRA_FAILURE;
+ }
+
+ impl_int_mask = SDW_DP0_INT_IMPDEF1 |
+ SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3;
+
+ if (status & impl_int_mask) {
+ clear |= impl_int_mask;
+ *slave_status = clear;
+ }
+
+ /* clear the interrupt */
+ ret = sdw_write(slave, SDW_DP0_INT, clear);
+ if (ret < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_DP0_INT write failed:%d", ret);
+ return ret;
+ }
+
+ /* Read DP0 interrupt again */
+ status2 = sdw_read(slave, SDW_DP0_INT);
+ if (status2 < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_DP0_INT read failed:%d", status2);
+ return status2;
+ }
+ status &= status2;
+
+ count++;
+
+ /* we can get alerts while processing so keep retrying */
+ } while (status != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
+
+ if (count == SDW_READ_INTR_CLEAR_RETRY)
+ dev_warn(slave->bus->dev, "Reached MAX_RETRY on DP0 read");
+
+ return ret;
+}
+
+static int sdw_handle_port_interrupt(struct sdw_slave *slave,
+ int port, u8 *slave_status)
+{
+ u8 clear = 0, impl_int_mask;
+ int status, status2, ret, count = 0;
+ u32 addr;
+
+ if (port == 0)
+ return sdw_handle_dp0_interrupt(slave, slave_status);
+
+ addr = SDW_DPN_INT(port);
+ status = sdw_read(slave, addr);
+ if (status < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_DPN_INT read failed:%d", status);
+
+ return status;
+ }
+
+ do {
+
+ if (status & SDW_DPN_INT_TEST_FAIL) {
+ dev_err(&slave->dev, "Test fail for port:%d", port);
+ clear |= SDW_DPN_INT_TEST_FAIL;
+ }
+
+ /*
+ * Assumption: PORT_READY interrupt will be received only
+ * for ports implementing CP_SM.
+ */
+ if (status & SDW_DPN_INT_PORT_READY) {
+ complete(&slave->port_ready[port]);
+ clear |= SDW_DPN_INT_PORT_READY;
+ }
+
+ impl_int_mask = SDW_DPN_INT_IMPDEF1 |
+ SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3;
+
+
+ if (status & impl_int_mask) {
+ clear |= impl_int_mask;
+ *slave_status = clear;
+ }
+
+ /* clear the interrupt */
+ ret = sdw_write(slave, addr, clear);
+ if (ret < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_DPN_INT write failed:%d", ret);
+ return ret;
+ }
+
+ /* Read DPN interrupt again */
+ status2 = sdw_read(slave, addr);
+ if (status2 < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_DPN_INT read failed:%d", status2);
+ return status2;
+ }
+ status &= status2;
+
+ count++;
+
+ /* we can get alerts while processing so keep retrying */
+ } while (status != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
+
+ if (count == SDW_READ_INTR_CLEAR_RETRY)
+ dev_warn(slave->bus->dev, "Reached MAX_RETRY on port read");
+
+ return ret;
+}
+
+static int sdw_handle_slave_alerts(struct sdw_slave *slave)
+{
+ struct sdw_slave_intr_status slave_intr;
+ u8 clear = 0, bit, port_status[15];
+ int port_num, stat, ret, count = 0;
+ unsigned long port;
+ bool slave_notify = false;
+ u8 buf, buf2[2], _buf, _buf2[2];
+
+ sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
+
+ /* Read Instat 1, Instat 2 and Instat 3 registers */
+ buf = ret = sdw_read(slave, SDW_SCP_INT1);
+ if (ret < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_SCP_INT1 read failed:%d", ret);
+ return ret;
+ }
+
+ ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, buf2);
+ if (ret < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_SCP_INT2/3 read failed:%d", ret);
+ return ret;
+ }
+
+ do {
+ /*
+ * Check parity, bus clash and Slave (impl defined)
+ * interrupt
+ */
+ if (buf & SDW_SCP_INT1_PARITY) {
+ dev_err(&slave->dev, "Parity error detected");
+ clear |= SDW_SCP_INT1_PARITY;
+ }
+
+ if (buf & SDW_SCP_INT1_BUS_CLASH) {
+ dev_err(&slave->dev, "Bus clash error detected");
+ clear |= SDW_SCP_INT1_BUS_CLASH;
+ }
+
+ /*
+ * When bus clash or parity errors are detected, such errors
+ * are unlikely to be recoverable errors.
+ * TODO: In such scenario, reset bus. Make this configurable
+ * via sysfs property with bus reset being the default.
+ */
+
+ if (buf & SDW_SCP_INT1_IMPL_DEF) {
+ dev_dbg(&slave->dev, "Slave impl defined interrupt\n");
+ clear |= SDW_SCP_INT1_IMPL_DEF;
+ slave_notify = true;
+ }
+
+ /* Check port 0 - 3 interrupts */
+ port = buf & SDW_SCP_INT1_PORT0_3;
+
+ /* To get port number corresponding to bits, shift it */
+ port = port >> SDW_REG_SHIFT(SDW_SCP_INT1_PORT0_3);
+ for_each_set_bit(bit, &port, 8) {
+ sdw_handle_port_interrupt(slave, bit,
+ &port_status[bit]);
+
+ }
+
+ /* Check if cascade 2 interrupt is present */
+ if (buf & SDW_SCP_INT1_SCP2_CASCADE) {
+ port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10;
+ for_each_set_bit(bit, &port, 8) {
+ /* scp2 ports start from 4 */
+ port_num = bit + 3;
+ sdw_handle_port_interrupt(slave,
+ port_num,
+ &port_status[port_num]);
+ }
+ }
+
+ /* now check last cascade */
+ if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) {
+ port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14;
+ for_each_set_bit(bit, &port, 8) {
+ /* scp3 ports start from 11 */
+ port_num = bit + 10;
+ sdw_handle_port_interrupt(slave,
+ port_num,
+ &port_status[port_num]);
+ }
+ }
+
+ /* Update the Slave driver */
+ if (slave_notify && (slave->ops) &&
+ (slave->ops->interrupt_callback)) {
+ slave_intr.control_port = clear;
+ memcpy(slave_intr.port, &port_status,
+ sizeof(slave_intr.port));
+
+ slave->ops->interrupt_callback(slave, &slave_intr);
+ }
+
+ /* Ack interrupt */
+ ret = sdw_write(slave, SDW_SCP_INT1, clear);
+ if (ret < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_SCP_INT1 write failed:%d", ret);
+ return ret;
+ }
+
+ /*
+ * Read status again to ensure no new interrupts arrived
+ * while servicing interrupts.
+ */
+ _buf = ret = sdw_read(slave, SDW_SCP_INT1);
+ if (ret < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_SCP_INT1 read failed:%d", ret);
+ return ret;
+ }
+
+ ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, _buf2);
+ if (ret < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_SCP_INT2/3 read failed:%d", ret);
+ return ret;
+ }
+
+ /* Make sure no interrupts are pending */
+ buf &= _buf;
+ buf2[0] &= _buf2[0];
+ buf2[1] &= _buf2[1];
+ stat = buf || buf2[0] || buf2[1];
+
+ /*
+ * Exit loop if Slave is continuously in ALERT state even
+ * after servicing the interrupt multiple times.
+ */
+ count++;
+
+ /* we can get alerts while processing so keep retrying */
+ } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
+
+ if (count == SDW_READ_INTR_CLEAR_RETRY)
+ dev_warn(slave->bus->dev, "Reached MAX_RETRY on alert read");
+
+ return ret;
+}
+
+static int sdw_update_slave_status(struct sdw_slave *slave,
+ enum sdw_slave_status status)
+{
+ if ((slave->ops) && (slave->ops->update_status))
+ return slave->ops->update_status(slave, status);
+
+ return 0;
+}
+
+/**
+ * sdw_handle_slave_status() - Handle Slave status
+ * @bus: SDW bus instance
+ * @status: Status for all Slave(s)
+ */
+int sdw_handle_slave_status(struct sdw_bus *bus,
+ enum sdw_slave_status status[])
+{
+ enum sdw_slave_status prev_status;
+ struct sdw_slave *slave;
+ int i, ret = 0;
+
+ if (status[0] == SDW_SLAVE_ATTACHED) {
+ ret = sdw_program_device_num(bus);
+ if (ret)
+ dev_err(bus->dev, "Slave attach failed: %d", ret);
+ }
+
+ /* Continue to check other slave statuses */
+ for (i = 1; i <= SDW_MAX_DEVICES; i++) {
+ mutex_lock(&bus->bus_lock);
+ if (test_bit(i, bus->assigned) == false) {
+ mutex_unlock(&bus->bus_lock);
+ continue;
+ }
+ mutex_unlock(&bus->bus_lock);
+
+ slave = sdw_get_slave(bus, i);
+ if (!slave)
+ continue;
+
+ switch (status[i]) {
+ case SDW_SLAVE_UNATTACHED:
+ if (slave->status == SDW_SLAVE_UNATTACHED)
+ break;
+
+ sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
+ break;
+
+ case SDW_SLAVE_ALERT:
+ ret = sdw_handle_slave_alerts(slave);
+ if (ret)
+ dev_err(bus->dev,
+ "Slave %d alert handling failed: %d",
+ i, ret);
+ break;
+
+ case SDW_SLAVE_ATTACHED:
+ if (slave->status == SDW_SLAVE_ATTACHED)
+ break;
+
+ prev_status = slave->status;
+ sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED);
+
+ if (prev_status == SDW_SLAVE_ALERT)
+ break;
+
+ ret = sdw_initialize_slave(slave);
+ if (ret)
+ dev_err(bus->dev,
+ "Slave %d initialization failed: %d",
+ i, ret);
+
+ break;
+
+ default:
+ dev_err(bus->dev, "Invalid slave %d status:%d",
+ i, status[i]);
+ break;
+ }
+
+ ret = sdw_update_slave_status(slave, status[i]);
+ if (ret)
+ dev_err(slave->bus->dev,
+ "Update Slave status failed:%d", ret);
+
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(sdw_handle_slave_status);
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
new file mode 100644
index 000000000000..345c34d697e9
--- /dev/null
+++ b/drivers/soundwire/bus.h
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SDW_BUS_H
+#define __SDW_BUS_H
+
+#if IS_ENABLED(CONFIG_ACPI)
+int sdw_acpi_find_slaves(struct sdw_bus *bus);
+#else
+static inline int sdw_acpi_find_slaves(struct sdw_bus *bus)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+void sdw_extract_slave_id(struct sdw_bus *bus,
+ u64 addr, struct sdw_slave_id *id);
+
+enum {
+ SDW_MSG_FLAG_READ = 0,
+ SDW_MSG_FLAG_WRITE,
+};
+
+/**
+ * struct sdw_msg - Message structure
+ * @addr: Register address accessed in the Slave
+ * @len: number of messages
+ * @dev_num: Slave device number
+ * @addr_page1: SCP address page 1 Slave register
+ * @addr_page2: SCP address page 2 Slave register
+ * @flags: transfer flags, indicate if xfer is read or write
+ * @buf: message data buffer
+ * @ssp_sync: Send message at SSP (Stream Synchronization Point)
+ * @page: address requires paging
+ */
+struct sdw_msg {
+ u16 addr;
+ u16 len;
+ u8 dev_num;
+ u8 addr_page1;
+ u8 addr_page2;
+ u8 flags;
+ u8 *buf;
+ bool ssp_sync;
+ bool page;
+};
+
+int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg);
+int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
+ struct sdw_defer *defer);
+
+#define SDW_READ_INTR_CLEAR_RETRY 10
+
+int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
+ u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf);
+
+/* Read-Modify-Write Slave register */
+static inline int
+sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+{
+ int tmp;
+
+ tmp = sdw_read(slave, addr);
+ if (tmp < 0)
+ return tmp;
+
+ tmp = (tmp & ~mask) | val;
+ return sdw_write(slave, addr, tmp);
+}
+
+#endif /* __SDW_BUS_H */
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
new file mode 100644
index 000000000000..d5f3a70c06b0
--- /dev/null
+++ b/drivers/soundwire/bus_type.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pm_domain.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+
+/**
+ * sdw_get_device_id - find the matching SoundWire device id
+ * @slave: SoundWire Slave Device
+ * @drv: SoundWire Slave Driver
+ *
+ * The match is done by comparing the mfg_id and part_id from the
+ * struct sdw_device_id.
+ */
+static const struct sdw_device_id *
+sdw_get_device_id(struct sdw_slave *slave, struct sdw_driver *drv)
+{
+ const struct sdw_device_id *id = drv->id_table;
+
+ while (id && id->mfg_id) {
+ if (slave->id.mfg_id == id->mfg_id &&
+ slave->id.part_id == id->part_id)
+ return id;
+ id++;
+ }
+
+ return NULL;
+}
+
+static int sdw_bus_match(struct device *dev, struct device_driver *ddrv)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct sdw_driver *drv = drv_to_sdw_driver(ddrv);
+
+ return !!sdw_get_device_id(slave, drv);
+}
+
+int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size)
+{
+ /* modalias is sdw:m<mfg_id>p<part_id> */
+
+ return snprintf(buf, size, "sdw:m%04Xp%04X\n",
+ slave->id.mfg_id, slave->id.part_id);
+}
+
+static int sdw_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ char modalias[32];
+
+ sdw_slave_modalias(slave, modalias, sizeof(modalias));
+
+ if (add_uevent_var(env, "MODALIAS=%s", modalias))
+ return -ENOMEM;
+
+ return 0;
+}
+
+struct bus_type sdw_bus_type = {
+ .name = "soundwire",
+ .match = sdw_bus_match,
+ .uevent = sdw_uevent,
+};
+EXPORT_SYMBOL_GPL(sdw_bus_type);
+
+static int sdw_drv_probe(struct device *dev)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+ const struct sdw_device_id *id;
+ int ret;
+
+ id = sdw_get_device_id(slave, drv);
+ if (!id)
+ return -ENODEV;
+
+ slave->ops = drv->ops;
+
+ /*
+ * attach to power domain but don't turn on (last arg)
+ */
+ ret = dev_pm_domain_attach(dev, false);
+ if (ret != -EPROBE_DEFER) {
+ ret = drv->probe(slave, id);
+ if (ret) {
+ dev_err(dev, "Probe of %s failed: %d\n", drv->name, ret);
+ dev_pm_domain_detach(dev, false);
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ /* device is probed so let's read the properties now */
+ if (slave->ops && slave->ops->read_prop)
+ slave->ops->read_prop(slave);
+
+ /*
+ * Check for valid clk_stop_timeout, use DisCo worst case value of
+ * 300ms
+ *
+ * TODO: check the timeouts and driver removal case
+ */
+ if (slave->prop.clk_stop_timeout == 0)
+ slave->prop.clk_stop_timeout = 300;
+
+ slave->bus->clk_stop_timeout = max_t(u32, slave->bus->clk_stop_timeout,
+ slave->prop.clk_stop_timeout);
+
+ return 0;
+}
+
+static int sdw_drv_remove(struct device *dev)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+ int ret = 0;
+
+ if (drv->remove)
+ ret = drv->remove(slave);
+
+ dev_pm_domain_detach(dev, false);
+
+ return ret;
+}
+
+static void sdw_drv_shutdown(struct device *dev)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+
+ if (drv->shutdown)
+ drv->shutdown(slave);
+}
+
+/**
+ * __sdw_register_driver() - register a SoundWire Slave driver
+ * @drv: driver to register
+ * @owner: owning module/driver
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int __sdw_register_driver(struct sdw_driver *drv, struct module *owner)
+{
+ drv->driver.bus = &sdw_bus_type;
+
+ if (!drv->probe) {
+ pr_err("driver %s didn't provide SDW probe routine\n",
+ drv->name);
+ return -EINVAL;
+ }
+
+ drv->driver.owner = owner;
+ drv->driver.probe = sdw_drv_probe;
+
+ if (drv->remove)
+ drv->driver.remove = sdw_drv_remove;
+
+ if (drv->shutdown)
+ drv->driver.shutdown = sdw_drv_shutdown;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__sdw_register_driver);
+
+/**
+ * sdw_unregister_driver() - unregisters the SoundWire Slave driver
+ * @drv: driver to unregister
+ */
+void sdw_unregister_driver(struct sdw_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(sdw_unregister_driver);
+
+static int __init sdw_bus_init(void)
+{
+ return bus_register(&sdw_bus_type);
+}
+
+static void __exit sdw_bus_exit(void)
+{
+ bus_unregister(&sdw_bus_type);
+}
+
+postcore_initcall(sdw_bus_init);
+module_exit(sdw_bus_exit);
+
+MODULE_DESCRIPTION("SoundWire bus");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
new file mode 100644
index 000000000000..3a9b1462039b
--- /dev/null
+++ b/drivers/soundwire/cadence_master.c
@@ -0,0 +1,751 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+/*
+ * Cadence SoundWire Master module
+ * Used by Master driver
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <linux/soundwire/sdw.h>
+#include "bus.h"
+#include "cadence_master.h"
+
+#define CDNS_MCP_CONFIG 0x0
+
+#define CDNS_MCP_CONFIG_MCMD_RETRY GENMASK(27, 24)
+#define CDNS_MCP_CONFIG_MPREQ_DELAY GENMASK(20, 16)
+#define CDNS_MCP_CONFIG_MMASTER BIT(7)
+#define CDNS_MCP_CONFIG_BUS_REL BIT(6)
+#define CDNS_MCP_CONFIG_SNIFFER BIT(5)
+#define CDNS_MCP_CONFIG_SSPMOD BIT(4)
+#define CDNS_MCP_CONFIG_CMD BIT(3)
+#define CDNS_MCP_CONFIG_OP GENMASK(2, 0)
+#define CDNS_MCP_CONFIG_OP_NORMAL 0
+
+#define CDNS_MCP_CONTROL 0x4
+
+#define CDNS_MCP_CONTROL_RST_DELAY GENMASK(10, 8)
+#define CDNS_MCP_CONTROL_CMD_RST BIT(7)
+#define CDNS_MCP_CONTROL_SOFT_RST BIT(6)
+#define CDNS_MCP_CONTROL_SW_RST BIT(5)
+#define CDNS_MCP_CONTROL_HW_RST BIT(4)
+#define CDNS_MCP_CONTROL_CLK_PAUSE BIT(3)
+#define CDNS_MCP_CONTROL_CLK_STOP_CLR BIT(2)
+#define CDNS_MCP_CONTROL_CMD_ACCEPT BIT(1)
+#define CDNS_MCP_CONTROL_BLOCK_WAKEUP BIT(0)
+
+
+#define CDNS_MCP_CMDCTRL 0x8
+#define CDNS_MCP_SSPSTAT 0xC
+#define CDNS_MCP_FRAME_SHAPE 0x10
+#define CDNS_MCP_FRAME_SHAPE_INIT 0x14
+
+#define CDNS_MCP_CONFIG_UPDATE 0x18
+#define CDNS_MCP_CONFIG_UPDATE_BIT BIT(0)
+
+#define CDNS_MCP_PHYCTRL 0x1C
+#define CDNS_MCP_SSP_CTRL0 0x20
+#define CDNS_MCP_SSP_CTRL1 0x28
+#define CDNS_MCP_CLK_CTRL0 0x30
+#define CDNS_MCP_CLK_CTRL1 0x38
+
+#define CDNS_MCP_STAT 0x40
+
+#define CDNS_MCP_STAT_ACTIVE_BANK BIT(20)
+#define CDNS_MCP_STAT_CLK_STOP BIT(16)
+
+#define CDNS_MCP_INTSTAT 0x44
+#define CDNS_MCP_INTMASK 0x48
+
+#define CDNS_MCP_INT_IRQ BIT(31)
+#define CDNS_MCP_INT_WAKEUP BIT(16)
+#define CDNS_MCP_INT_SLAVE_RSVD BIT(15)
+#define CDNS_MCP_INT_SLAVE_ALERT BIT(14)
+#define CDNS_MCP_INT_SLAVE_ATTACH BIT(13)
+#define CDNS_MCP_INT_SLAVE_NATTACH BIT(12)
+#define CDNS_MCP_INT_SLAVE_MASK GENMASK(15, 12)
+#define CDNS_MCP_INT_DPINT BIT(11)
+#define CDNS_MCP_INT_CTRL_CLASH BIT(10)
+#define CDNS_MCP_INT_DATA_CLASH BIT(9)
+#define CDNS_MCP_INT_CMD_ERR BIT(7)
+#define CDNS_MCP_INT_RX_WL BIT(2)
+#define CDNS_MCP_INT_TXE BIT(1)
+
+#define CDNS_MCP_INTSET 0x4C
+
+#define CDNS_SDW_SLAVE_STAT 0x50
+#define CDNS_MCP_SLAVE_STAT_MASK BIT(1, 0)
+
+#define CDNS_MCP_SLAVE_INTSTAT0 0x54
+#define CDNS_MCP_SLAVE_INTSTAT1 0x58
+#define CDNS_MCP_SLAVE_INTSTAT_NPRESENT BIT(0)
+#define CDNS_MCP_SLAVE_INTSTAT_ATTACHED BIT(1)
+#define CDNS_MCP_SLAVE_INTSTAT_ALERT BIT(2)
+#define CDNS_MCP_SLAVE_INTSTAT_RESERVED BIT(3)
+#define CDNS_MCP_SLAVE_STATUS_BITS GENMASK(3, 0)
+#define CDNS_MCP_SLAVE_STATUS_NUM 4
+
+#define CDNS_MCP_SLAVE_INTMASK0 0x5C
+#define CDNS_MCP_SLAVE_INTMASK1 0x60
+
+#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(30, 0)
+#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(16, 0)
+
+#define CDNS_MCP_PORT_INTSTAT 0x64
+#define CDNS_MCP_PDI_STAT 0x6C
+
+#define CDNS_MCP_FIFOLEVEL 0x78
+#define CDNS_MCP_FIFOSTAT 0x7C
+#define CDNS_MCP_RX_FIFO_AVAIL GENMASK(5, 0)
+
+#define CDNS_MCP_CMD_BASE 0x80
+#define CDNS_MCP_RESP_BASE 0x80
+#define CDNS_MCP_CMD_LEN 0x20
+#define CDNS_MCP_CMD_WORD_LEN 0x4
+
+#define CDNS_MCP_CMD_SSP_TAG BIT(31)
+#define CDNS_MCP_CMD_COMMAND GENMASK(30, 28)
+#define CDNS_MCP_CMD_DEV_ADDR GENMASK(27, 24)
+#define CDNS_MCP_CMD_REG_ADDR_H GENMASK(23, 16)
+#define CDNS_MCP_CMD_REG_ADDR_L GENMASK(15, 8)
+#define CDNS_MCP_CMD_REG_DATA GENMASK(7, 0)
+
+#define CDNS_MCP_CMD_READ 2
+#define CDNS_MCP_CMD_WRITE 3
+
+#define CDNS_MCP_RESP_RDATA GENMASK(15, 8)
+#define CDNS_MCP_RESP_ACK BIT(0)
+#define CDNS_MCP_RESP_NACK BIT(1)
+
+#define CDNS_DP_SIZE 128
+
+#define CDNS_DPN_B0_CONFIG(n) (0x100 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B0_CH_EN(n) (0x104 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B0_SAMPLE_CTRL(n) (0x108 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B0_OFFSET_CTRL(n) (0x10C + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B0_HCTRL(n) (0x110 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B0_ASYNC_CTRL(n) (0x114 + CDNS_DP_SIZE * (n))
+
+#define CDNS_DPN_B1_CONFIG(n) (0x118 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B1_CH_EN(n) (0x11C + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B1_SAMPLE_CTRL(n) (0x120 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B1_OFFSET_CTRL(n) (0x124 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B1_HCTRL(n) (0x128 + CDNS_DP_SIZE * (n))
+#define CDNS_DPN_B1_ASYNC_CTRL(n) (0x12C + CDNS_DP_SIZE * (n))
+
+#define CDNS_DPN_CONFIG_BPM BIT(18)
+#define CDNS_DPN_CONFIG_BGC GENMASK(17, 16)
+#define CDNS_DPN_CONFIG_WL GENMASK(12, 8)
+#define CDNS_DPN_CONFIG_PORT_DAT GENMASK(3, 2)
+#define CDNS_DPN_CONFIG_PORT_FLOW GENMASK(1, 0)
+
+#define CDNS_DPN_SAMPLE_CTRL_SI GENMASK(15, 0)
+
+#define CDNS_DPN_OFFSET_CTRL_1 GENMASK(7, 0)
+#define CDNS_DPN_OFFSET_CTRL_2 GENMASK(15, 8)
+
+#define CDNS_DPN_HCTRL_HSTOP GENMASK(3, 0)
+#define CDNS_DPN_HCTRL_HSTART GENMASK(7, 4)
+#define CDNS_DPN_HCTRL_LCTRL GENMASK(10, 8)
+
+#define CDNS_PORTCTRL 0x130
+#define CDNS_PORTCTRL_DIRN BIT(7)
+#define CDNS_PORTCTRL_BANK_INVERT BIT(8)
+
+#define CDNS_PORT_OFFSET 0x80
+
+#define CDNS_PDI_CONFIG(n) (0x1100 + (n) * 16)
+
+#define CDNS_PDI_CONFIG_SOFT_RESET BIT(24)
+#define CDNS_PDI_CONFIG_CHANNEL GENMASK(15, 8)
+#define CDNS_PDI_CONFIG_PORT GENMASK(4, 0)
+
+/* Driver defaults */
+
+#define CDNS_DEFAULT_CLK_DIVIDER 0
+#define CDNS_DEFAULT_FRAME_SHAPE 0x30
+#define CDNS_DEFAULT_SSP_INTERVAL 0x18
+#define CDNS_TX_TIMEOUT 2000
+
+#define CDNS_PCM_PDI_OFFSET 0x2
+#define CDNS_PDM_PDI_OFFSET 0x6
+
+#define CDNS_SCP_RX_FIFOLEVEL 0x2
+
+/*
+ * register accessor helpers
+ */
+static inline u32 cdns_readl(struct sdw_cdns *cdns, int offset)
+{
+ return readl(cdns->registers + offset);
+}
+
+static inline void cdns_writel(struct sdw_cdns *cdns, int offset, u32 value)
+{
+ writel(value, cdns->registers + offset);
+}
+
+static inline void cdns_updatel(struct sdw_cdns *cdns,
+ int offset, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = cdns_readl(cdns, offset);
+ tmp = (tmp & ~mask) | val;
+ cdns_writel(cdns, offset, tmp);
+}
+
+static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
+{
+ int timeout = 10;
+ u32 reg_read;
+
+ writel(value, cdns->registers + offset);
+
+ /* Wait for bit to be self cleared */
+ do {
+ reg_read = readl(cdns->registers + offset);
+ if ((reg_read & value) == 0)
+ return 0;
+
+ timeout--;
+ udelay(50);
+ } while (timeout != 0);
+
+ return -EAGAIN;
+}
+
+/*
+ * IO Calls
+ */
+static enum sdw_command_response cdns_fill_msg_resp(
+ struct sdw_cdns *cdns,
+ struct sdw_msg *msg, int count, int offset)
+{
+ int nack = 0, no_ack = 0;
+ int i;
+
+ /* check message response */
+ for (i = 0; i < count; i++) {
+ if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
+ no_ack = 1;
+ dev_dbg(cdns->dev, "Msg Ack not received\n");
+ if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
+ nack = 1;
+ dev_err(cdns->dev, "Msg NACK received\n");
+ }
+ }
+ }
+
+ if (nack) {
+ dev_err(cdns->dev, "Msg NACKed for Slave %d\n", msg->dev_num);
+ return SDW_CMD_FAIL;
+ } else if (no_ack) {
+ dev_dbg(cdns->dev, "Msg ignored for Slave %d\n", msg->dev_num);
+ return SDW_CMD_IGNORED;
+ }
+
+ /* fill response */
+ for (i = 0; i < count; i++)
+ msg->buf[i + offset] = cdns->response_buf[i] >>
+ SDW_REG_SHIFT(CDNS_MCP_RESP_RDATA);
+
+ return SDW_CMD_OK;
+}
+
+static enum sdw_command_response
+_cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
+ int offset, int count, bool defer)
+{
+ unsigned long time;
+ u32 base, i, data;
+ u16 addr;
+
+ /* Program the watermark level for RX FIFO */
+ if (cdns->msg_count != count) {
+ cdns_writel(cdns, CDNS_MCP_FIFOLEVEL, count);
+ cdns->msg_count = count;
+ }
+
+ base = CDNS_MCP_CMD_BASE;
+ addr = msg->addr;
+
+ for (i = 0; i < count; i++) {
+ data = msg->dev_num << SDW_REG_SHIFT(CDNS_MCP_CMD_DEV_ADDR);
+ data |= cmd << SDW_REG_SHIFT(CDNS_MCP_CMD_COMMAND);
+ data |= addr++ << SDW_REG_SHIFT(CDNS_MCP_CMD_REG_ADDR_L);
+
+ if (msg->flags == SDW_MSG_FLAG_WRITE)
+ data |= msg->buf[i + offset];
+
+ data |= msg->ssp_sync << SDW_REG_SHIFT(CDNS_MCP_CMD_SSP_TAG);
+ cdns_writel(cdns, base, data);
+ base += CDNS_MCP_CMD_WORD_LEN;
+ }
+
+ if (defer)
+ return SDW_CMD_OK;
+
+ /* wait for timeout or response */
+ time = wait_for_completion_timeout(&cdns->tx_complete,
+ msecs_to_jiffies(CDNS_TX_TIMEOUT));
+ if (!time) {
+ dev_err(cdns->dev, "IO transfer timed out\n");
+ msg->len = 0;
+ return SDW_CMD_TIMEOUT;
+ }
+
+ return cdns_fill_msg_resp(cdns, msg, count, offset);
+}
+
+static enum sdw_command_response cdns_program_scp_addr(
+ struct sdw_cdns *cdns, struct sdw_msg *msg)
+{
+ int nack = 0, no_ack = 0;
+ unsigned long time;
+ u32 data[2], base;
+ int i;
+
+ /* Program the watermark level for RX FIFO */
+ if (cdns->msg_count != CDNS_SCP_RX_FIFOLEVEL) {
+ cdns_writel(cdns, CDNS_MCP_FIFOLEVEL, CDNS_SCP_RX_FIFOLEVEL);
+ cdns->msg_count = CDNS_SCP_RX_FIFOLEVEL;
+ }
+
+ data[0] = msg->dev_num << SDW_REG_SHIFT(CDNS_MCP_CMD_DEV_ADDR);
+ data[0] |= 0x3 << SDW_REG_SHIFT(CDNS_MCP_CMD_COMMAND);
+ data[1] = data[0];
+
+ data[0] |= SDW_SCP_ADDRPAGE1 << SDW_REG_SHIFT(CDNS_MCP_CMD_REG_ADDR_L);
+ data[1] |= SDW_SCP_ADDRPAGE2 << SDW_REG_SHIFT(CDNS_MCP_CMD_REG_ADDR_L);
+
+ data[0] |= msg->addr_page1;
+ data[1] |= msg->addr_page2;
+
+ base = CDNS_MCP_CMD_BASE;
+ cdns_writel(cdns, base, data[0]);
+ base += CDNS_MCP_CMD_WORD_LEN;
+ cdns_writel(cdns, base, data[1]);
+
+ time = wait_for_completion_timeout(&cdns->tx_complete,
+ msecs_to_jiffies(CDNS_TX_TIMEOUT));
+ if (!time) {
+ dev_err(cdns->dev, "SCP Msg trf timed out\n");
+ msg->len = 0;
+ return SDW_CMD_TIMEOUT;
+ }
+
+ /* check response the writes */
+ for (i = 0; i < 2; i++) {
+ if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
+ no_ack = 1;
+ dev_err(cdns->dev, "Program SCP Ack not received");
+ if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
+ nack = 1;
+ dev_err(cdns->dev, "Program SCP NACK received");
+ }
+ }
+ }
+
+ /* For NACK, NO ack, don't return err if we are in Broadcast mode */
+ if (nack) {
+ dev_err(cdns->dev,
+ "SCP_addrpage NACKed for Slave %d", msg->dev_num);
+ return SDW_CMD_FAIL;
+ } else if (no_ack) {
+ dev_dbg(cdns->dev,
+ "SCP_addrpage ignored for Slave %d", msg->dev_num);
+ return SDW_CMD_IGNORED;
+ }
+
+ return SDW_CMD_OK;
+}
+
+static int cdns_prep_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int *cmd)
+{
+ int ret;
+
+ if (msg->page) {
+ ret = cdns_program_scp_addr(cdns, msg);
+ if (ret) {
+ msg->len = 0;
+ return ret;
+ }
+ }
+
+ switch (msg->flags) {
+ case SDW_MSG_FLAG_READ:
+ *cmd = CDNS_MCP_CMD_READ;
+ break;
+
+ case SDW_MSG_FLAG_WRITE:
+ *cmd = CDNS_MCP_CMD_WRITE;
+ break;
+
+ default:
+ dev_err(cdns->dev, "Invalid msg cmd: %d\n", msg->flags);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum sdw_command_response
+cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+ int cmd = 0, ret, i;
+
+ ret = cdns_prep_msg(cdns, msg, &cmd);
+ if (ret)
+ return SDW_CMD_FAIL_OTHER;
+
+ for (i = 0; i < msg->len / CDNS_MCP_CMD_LEN; i++) {
+ ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
+ CDNS_MCP_CMD_LEN, false);
+ if (ret < 0)
+ goto exit;
+ }
+
+ if (!(msg->len % CDNS_MCP_CMD_LEN))
+ goto exit;
+
+ ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
+ msg->len % CDNS_MCP_CMD_LEN, false);
+
+exit:
+ return ret;
+}
+
+static enum sdw_command_response
+cdns_xfer_msg_defer(struct sdw_bus *bus,
+ struct sdw_msg *msg, struct sdw_defer *defer)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+ int cmd = 0, ret;
+
+ /* for defer only 1 message is supported */
+ if (msg->len > 1)
+ return -ENOTSUPP;
+
+ ret = cdns_prep_msg(cdns, msg, &cmd);
+ if (ret)
+ return SDW_CMD_FAIL_OTHER;
+
+ cdns->defer = defer;
+ cdns->defer->length = msg->len;
+
+ return _cdns_xfer_msg(cdns, msg, cmd, 0, msg->len, true);
+}
+
+static enum sdw_command_response
+cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+ struct sdw_msg msg;
+
+ /* Create dummy message with valid device number */
+ memset(&msg, 0, sizeof(msg));
+ msg.dev_num = dev_num;
+
+ return cdns_program_scp_addr(cdns, &msg);
+}
+
+/*
+ * IRQ handling
+ */
+
+static void cdns_read_response(struct sdw_cdns *cdns)
+{
+ u32 num_resp, cmd_base;
+ int i;
+
+ num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT);
+ num_resp &= CDNS_MCP_RX_FIFO_AVAIL;
+
+ cmd_base = CDNS_MCP_CMD_BASE;
+
+ for (i = 0; i < num_resp; i++) {
+ cdns->response_buf[i] = cdns_readl(cdns, cmd_base);
+ cmd_base += CDNS_MCP_CMD_WORD_LEN;
+ }
+}
+
+static int cdns_update_slave_status(struct sdw_cdns *cdns,
+ u32 slave0, u32 slave1)
+{
+ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
+ bool is_slave = false;
+ u64 slave, mask;
+ int i, set_status;
+
+ /* combine the two status */
+ slave = ((u64)slave1 << 32) | slave0;
+ memset(status, 0, sizeof(status));
+
+ for (i = 0; i <= SDW_MAX_DEVICES; i++) {
+ mask = (slave >> (i * CDNS_MCP_SLAVE_STATUS_NUM)) &
+ CDNS_MCP_SLAVE_STATUS_BITS;
+ if (!mask)
+ continue;
+
+ is_slave = true;
+ set_status = 0;
+
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_RESERVED) {
+ status[i] = SDW_SLAVE_RESERVED;
+ set_status++;
+ }
+
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_ATTACHED) {
+ status[i] = SDW_SLAVE_ATTACHED;
+ set_status++;
+ }
+
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_ALERT) {
+ status[i] = SDW_SLAVE_ALERT;
+ set_status++;
+ }
+
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_NPRESENT) {
+ status[i] = SDW_SLAVE_UNATTACHED;
+ set_status++;
+ }
+
+ /* first check if Slave reported multiple status */
+ if (set_status > 1) {
+ dev_warn(cdns->dev,
+ "Slave reported multiple Status: %d\n",
+ status[i]);
+ /*
+ * TODO: we need to reread the status here by
+ * issuing a PING cmd
+ */
+ }
+ }
+
+ if (is_slave)
+ return sdw_handle_slave_status(&cdns->bus, status);
+
+ return 0;
+}
+
+/**
+ * sdw_cdns_irq() - Cadence interrupt handler
+ * @irq: irq number
+ * @dev_id: irq context
+ */
+irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
+{
+ struct sdw_cdns *cdns = dev_id;
+ u32 int_status;
+ int ret = IRQ_HANDLED;
+
+ /* Check if the link is up */
+ if (!cdns->link_up)
+ return IRQ_NONE;
+
+ int_status = cdns_readl(cdns, CDNS_MCP_INTSTAT);
+
+ if (!(int_status & CDNS_MCP_INT_IRQ))
+ return IRQ_NONE;
+
+ if (int_status & CDNS_MCP_INT_RX_WL) {
+ cdns_read_response(cdns);
+
+ if (cdns->defer) {
+ cdns_fill_msg_resp(cdns, cdns->defer->msg,
+ cdns->defer->length, 0);
+ complete(&cdns->defer->complete);
+ cdns->defer = NULL;
+ } else
+ complete(&cdns->tx_complete);
+ }
+
+ if (int_status & CDNS_MCP_INT_CTRL_CLASH) {
+
+ /* Slave is driving bit slot during control word */
+ dev_err_ratelimited(cdns->dev, "Bus clash for control word\n");
+ int_status |= CDNS_MCP_INT_CTRL_CLASH;
+ }
+
+ if (int_status & CDNS_MCP_INT_DATA_CLASH) {
+ /*
+ * Multiple slaves trying to drive bit slot, or issue with
+ * ownership of data bits or Slave gone bonkers
+ */
+ dev_err_ratelimited(cdns->dev, "Bus clash for data word\n");
+ int_status |= CDNS_MCP_INT_DATA_CLASH;
+ }
+
+ if (int_status & CDNS_MCP_INT_SLAVE_MASK) {
+ /* Mask the Slave interrupt and wake thread */
+ cdns_updatel(cdns, CDNS_MCP_INTMASK,
+ CDNS_MCP_INT_SLAVE_MASK, 0);
+
+ int_status &= ~CDNS_MCP_INT_SLAVE_MASK;
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ cdns_writel(cdns, CDNS_MCP_INTSTAT, int_status);
+ return ret;
+}
+EXPORT_SYMBOL(sdw_cdns_irq);
+
+/**
+ * sdw_cdns_thread() - Cadence irq thread handler
+ * @irq: irq number
+ * @dev_id: irq context
+ */
+irqreturn_t sdw_cdns_thread(int irq, void *dev_id)
+{
+ struct sdw_cdns *cdns = dev_id;
+ u32 slave0, slave1;
+
+ dev_dbg(cdns->dev, "Slave status change\n");
+
+ slave0 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
+ slave1 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
+
+ cdns_update_slave_status(cdns, slave0, slave1);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
+
+ /* clear and unmask Slave interrupt now */
+ cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
+ cdns_updatel(cdns, CDNS_MCP_INTMASK,
+ CDNS_MCP_INT_SLAVE_MASK, CDNS_MCP_INT_SLAVE_MASK);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(sdw_cdns_thread);
+
+/*
+ * init routines
+ */
+static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
+{
+ u32 mask;
+
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0,
+ CDNS_MCP_SLAVE_INTMASK0_MASK);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1,
+ CDNS_MCP_SLAVE_INTMASK1_MASK);
+
+ mask = CDNS_MCP_INT_SLAVE_RSVD | CDNS_MCP_INT_SLAVE_ALERT |
+ CDNS_MCP_INT_SLAVE_ATTACH | CDNS_MCP_INT_SLAVE_NATTACH |
+ CDNS_MCP_INT_CTRL_CLASH | CDNS_MCP_INT_DATA_CLASH |
+ CDNS_MCP_INT_RX_WL | CDNS_MCP_INT_IRQ | CDNS_MCP_INT_DPINT;
+
+ cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
+
+ return 0;
+}
+
+/**
+ * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns)
+{
+ int ret;
+
+ _cdns_enable_interrupt(cdns);
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
+ CDNS_MCP_CONFIG_UPDATE_BIT);
+ if (ret < 0)
+ dev_err(cdns->dev, "Config update timedout");
+
+ return ret;
+}
+EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
+
+/**
+ * sdw_cdns_init() - Cadence initialization
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_init(struct sdw_cdns *cdns)
+{
+ u32 val;
+ int ret;
+
+ /* Exit clock stop */
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_CLK_STOP_CLR);
+ if (ret < 0) {
+ dev_err(cdns->dev, "Couldn't exit from clock stop\n");
+ return ret;
+ }
+
+ /* Set clock divider */
+ val = cdns_readl(cdns, CDNS_MCP_CLK_CTRL0);
+ val |= CDNS_DEFAULT_CLK_DIVIDER;
+ cdns_writel(cdns, CDNS_MCP_CLK_CTRL0, val);
+
+ /* Set the default frame shape */
+ cdns_writel(cdns, CDNS_MCP_FRAME_SHAPE_INIT, CDNS_DEFAULT_FRAME_SHAPE);
+
+ /* Set SSP interval to default value */
+ cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, CDNS_DEFAULT_SSP_INTERVAL);
+ cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, CDNS_DEFAULT_SSP_INTERVAL);
+
+ /* Set cmd accept mode */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT,
+ CDNS_MCP_CONTROL_CMD_ACCEPT);
+
+ /* Configure mcp config */
+ val = cdns_readl(cdns, CDNS_MCP_CONFIG);
+
+ /* Set Max cmd retry to 15 */
+ val |= CDNS_MCP_CONFIG_MCMD_RETRY;
+
+ /* Set frame delay between PREQ and ping frame to 15 frames */
+ val |= 0xF << SDW_REG_SHIFT(CDNS_MCP_CONFIG_MPREQ_DELAY);
+
+ /* Disable auto bus release */
+ val &= ~CDNS_MCP_CONFIG_BUS_REL;
+
+ /* Disable sniffer mode */
+ val &= ~CDNS_MCP_CONFIG_SNIFFER;
+
+ /* Set cmd mode for Tx and Rx cmds */
+ val &= ~CDNS_MCP_CONFIG_CMD;
+
+ /* Set operation to normal */
+ val &= ~CDNS_MCP_CONFIG_OP;
+ val |= CDNS_MCP_CONFIG_OP_NORMAL;
+
+ cdns_writel(cdns, CDNS_MCP_CONFIG, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_cdns_init);
+
+struct sdw_master_ops sdw_cdns_master_ops = {
+ .read_prop = sdw_master_read_prop,
+ .xfer_msg = cdns_xfer_msg,
+ .xfer_msg_defer = cdns_xfer_msg_defer,
+ .reset_page_addr = cdns_reset_page_addr,
+};
+EXPORT_SYMBOL(sdw_cdns_master_ops);
+
+/**
+ * sdw_cdns_probe() - Cadence probe routine
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_probe(struct sdw_cdns *cdns)
+{
+ init_completion(&cdns->tx_complete);
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_cdns_probe);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Cadence Soundwire Library");
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
new file mode 100644
index 000000000000..beaf6c9804eb
--- /dev/null
+++ b/drivers/soundwire/cadence_master.h
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SDW_CADENCE_H
+#define __SDW_CADENCE_H
+
+/**
+ * struct sdw_cdns - Cadence driver context
+ * @dev: Linux device
+ * @bus: Bus handle
+ * @instance: instance number
+ * @response_buf: SoundWire response buffer
+ * @tx_complete: Tx completion
+ * @defer: Defer pointer
+ * @registers: Cadence registers
+ * @link_up: Link status
+ * @msg_count: Messages sent on bus
+ */
+struct sdw_cdns {
+ struct device *dev;
+ struct sdw_bus bus;
+ unsigned int instance;
+
+ u32 response_buf[0x80];
+ struct completion tx_complete;
+ struct sdw_defer *defer;
+
+ void __iomem *registers;
+
+ bool link_up;
+ unsigned int msg_count;
+};
+
+#define bus_to_cdns(_bus) container_of(_bus, struct sdw_cdns, bus)
+
+/* Exported symbols */
+
+int sdw_cdns_probe(struct sdw_cdns *cdns);
+extern struct sdw_master_ops sdw_cdns_master_ops;
+
+irqreturn_t sdw_cdns_irq(int irq, void *dev_id);
+irqreturn_t sdw_cdns_thread(int irq, void *dev_id);
+
+int sdw_cdns_init(struct sdw_cdns *cdns);
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns);
+
+
+#endif /* __SDW_CADENCE_H */
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
new file mode 100644
index 000000000000..86a7bd1fc912
--- /dev/null
+++ b/drivers/soundwire/intel.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+/*
+ * Soundwire Intel Master Driver
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
+#include "cadence_master.h"
+#include "intel.h"
+
+/* Intel SHIM Registers Definition */
+#define SDW_SHIM_LCAP 0x0
+#define SDW_SHIM_LCTL 0x4
+#define SDW_SHIM_IPPTR 0x8
+#define SDW_SHIM_SYNC 0xC
+
+#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * x)
+#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * x)
+#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * x)
+#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * x)
+#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * x)
+#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * x)
+
+#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * x) + (0x2 * y))
+#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * x) + (0x2 * y))
+#define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * x)
+#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * x)
+#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * x)
+
+#define SDW_SHIM_WAKEEN 0x190
+#define SDW_SHIM_WAKESTS 0x192
+
+#define SDW_SHIM_LCTL_SPA BIT(0)
+#define SDW_SHIM_LCTL_CPA BIT(8)
+
+#define SDW_SHIM_SYNC_SYNCPRD_VAL 0x176F
+#define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0)
+#define SDW_SHIM_SYNC_SYNCCPU BIT(15)
+#define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16)
+#define SDW_SHIM_SYNC_CMDSYNC BIT(16)
+#define SDW_SHIM_SYNC_SYNCGO BIT(24)
+
+#define SDW_SHIM_PCMSCAP_ISS GENMASK(3, 0)
+#define SDW_SHIM_PCMSCAP_OSS GENMASK(7, 4)
+#define SDW_SHIM_PCMSCAP_BSS GENMASK(12, 8)
+
+#define SDW_SHIM_PCMSYCM_LCHN GENMASK(3, 0)
+#define SDW_SHIM_PCMSYCM_HCHN GENMASK(7, 4)
+#define SDW_SHIM_PCMSYCM_STREAM GENMASK(13, 8)
+#define SDW_SHIM_PCMSYCM_DIR BIT(15)
+
+#define SDW_SHIM_PDMSCAP_ISS GENMASK(3, 0)
+#define SDW_SHIM_PDMSCAP_OSS GENMASK(7, 4)
+#define SDW_SHIM_PDMSCAP_BSS GENMASK(12, 8)
+#define SDW_SHIM_PDMSCAP_CPSS GENMASK(15, 13)
+
+#define SDW_SHIM_IOCTL_MIF BIT(0)
+#define SDW_SHIM_IOCTL_CO BIT(1)
+#define SDW_SHIM_IOCTL_COE BIT(2)
+#define SDW_SHIM_IOCTL_DO BIT(3)
+#define SDW_SHIM_IOCTL_DOE BIT(4)
+#define SDW_SHIM_IOCTL_BKE BIT(5)
+#define SDW_SHIM_IOCTL_WPDD BIT(6)
+#define SDW_SHIM_IOCTL_CIBD BIT(8)
+#define SDW_SHIM_IOCTL_DIBD BIT(9)
+
+#define SDW_SHIM_CTMCTL_DACTQE BIT(0)
+#define SDW_SHIM_CTMCTL_DODS BIT(1)
+#define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3)
+
+#define SDW_SHIM_WAKEEN_ENABLE BIT(0)
+#define SDW_SHIM_WAKESTS_STATUS BIT(0)
+
+/* Intel ALH Register definitions */
+#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * x))
+
+#define SDW_ALH_STRMZCFG_DMAT_VAL 0x3
+#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
+#define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16)
+
+struct sdw_intel {
+ struct sdw_cdns cdns;
+ int instance;
+ struct sdw_intel_link_res *res;
+};
+
+#define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
+
+/*
+ * Read, write helpers for HW registers
+ */
+static inline int intel_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static inline void intel_writel(void __iomem *base, int offset, int value)
+{
+ writel(value, base + offset);
+}
+
+static inline u16 intel_readw(void __iomem *base, int offset)
+{
+ return readw(base + offset);
+}
+
+static inline void intel_writew(void __iomem *base, int offset, u16 value)
+{
+ writew(value, base + offset);
+}
+
+static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
+{
+ int timeout = 10;
+ u32 reg_read;
+
+ writel(value, base + offset);
+ do {
+ reg_read = readl(base + offset);
+ if (!(reg_read & mask))
+ return 0;
+
+ timeout--;
+ udelay(50);
+ } while (timeout != 0);
+
+ return -EAGAIN;
+}
+
+static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
+{
+ int timeout = 10;
+ u32 reg_read;
+
+ writel(value, base + offset);
+ do {
+ reg_read = readl(base + offset);
+ if (reg_read & mask)
+ return 0;
+
+ timeout--;
+ udelay(50);
+ } while (timeout != 0);
+
+ return -EAGAIN;
+}
+
+/*
+ * shim ops
+ */
+
+static int intel_link_power_up(struct sdw_intel *sdw)
+{
+ unsigned int link_id = sdw->instance;
+ void __iomem *shim = sdw->res->shim;
+ int spa_mask, cpa_mask;
+ int link_control, ret;
+
+ /* Link power up sequence */
+ link_control = intel_readl(shim, SDW_SHIM_LCTL);
+ spa_mask = (SDW_SHIM_LCTL_SPA << link_id);
+ cpa_mask = (SDW_SHIM_LCTL_CPA << link_id);
+ link_control |= spa_mask;
+
+ ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
+ if (ret < 0)
+ return ret;
+
+ sdw->cdns.link_up = true;
+ return 0;
+}
+
+static int intel_shim_init(struct sdw_intel *sdw)
+{
+ void __iomem *shim = sdw->res->shim;
+ unsigned int link_id = sdw->instance;
+ int sync_reg, ret;
+ u16 ioctl = 0, act = 0;
+
+ /* Initialize Shim */
+ ioctl |= SDW_SHIM_IOCTL_BKE;
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+ ioctl |= SDW_SHIM_IOCTL_WPDD;
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+ ioctl |= SDW_SHIM_IOCTL_DO;
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+ ioctl |= SDW_SHIM_IOCTL_DOE;
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+ /* Switch to MIP from Glue logic */
+ ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
+
+ ioctl &= ~(SDW_SHIM_IOCTL_DOE);
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+ ioctl &= ~(SDW_SHIM_IOCTL_DO);
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+ ioctl |= (SDW_SHIM_IOCTL_MIF);
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+ ioctl &= ~(SDW_SHIM_IOCTL_BKE);
+ ioctl &= ~(SDW_SHIM_IOCTL_COE);
+
+ intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
+
+ act |= 0x1 << SDW_REG_SHIFT(SDW_SHIM_CTMCTL_DOAIS);
+ act |= SDW_SHIM_CTMCTL_DACTQE;
+ act |= SDW_SHIM_CTMCTL_DODS;
+ intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
+
+ /* Now set SyncPRD period */
+ sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+ sync_reg |= (SDW_SHIM_SYNC_SYNCPRD_VAL <<
+ SDW_REG_SHIFT(SDW_SHIM_SYNC_SYNCPRD));
+
+ /* Set SyncCPU bit */
+ sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
+ ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
+ SDW_SHIM_SYNC_SYNCCPU);
+ if (ret < 0)
+ dev_err(sdw->cdns.dev, "Failed to set sync period: %d", ret);
+
+ return ret;
+}
+
+static int intel_prop_read(struct sdw_bus *bus)
+{
+ /* Initialize with default handler to read all DisCo properties */
+ sdw_master_read_prop(bus);
+
+ /* BIOS is not giving some values correctly. So, lets override them */
+ bus->prop.num_freq = 1;
+ bus->prop.freq = devm_kcalloc(bus->dev, sizeof(*bus->prop.freq),
+ bus->prop.num_freq, GFP_KERNEL);
+ if (!bus->prop.freq)
+ return -ENOMEM;
+
+ bus->prop.freq[0] = bus->prop.max_freq;
+ bus->prop.err_threshold = 5;
+
+ return 0;
+}
+
+/*
+ * probe and init
+ */
+static int intel_probe(struct platform_device *pdev)
+{
+ struct sdw_intel *sdw;
+ int ret;
+
+ sdw = devm_kzalloc(&pdev->dev, sizeof(*sdw), GFP_KERNEL);
+ if (!sdw)
+ return -ENOMEM;
+
+ sdw->instance = pdev->id;
+ sdw->res = dev_get_platdata(&pdev->dev);
+ sdw->cdns.dev = &pdev->dev;
+ sdw->cdns.registers = sdw->res->registers;
+ sdw->cdns.instance = sdw->instance;
+ sdw->cdns.msg_count = 0;
+ sdw->cdns.bus.dev = &pdev->dev;
+ sdw->cdns.bus.link_id = pdev->id;
+
+ sdw_cdns_probe(&sdw->cdns);
+
+ /* Set property read ops */
+ sdw_cdns_master_ops.read_prop = intel_prop_read;
+ sdw->cdns.bus.ops = &sdw_cdns_master_ops;
+
+ platform_set_drvdata(pdev, sdw);
+
+ ret = sdw_add_bus_master(&sdw->cdns.bus);
+ if (ret) {
+ dev_err(&pdev->dev, "sdw_add_bus_master fail: %d\n", ret);
+ goto err_master_reg;
+ }
+
+ /* Initialize shim and controller */
+ intel_link_power_up(sdw);
+ intel_shim_init(sdw);
+
+ ret = sdw_cdns_init(&sdw->cdns);
+ if (ret)
+ goto err_init;
+
+ ret = sdw_cdns_enable_interrupt(&sdw->cdns);
+ if (ret)
+ goto err_init;
+
+ /* Acquire IRQ */
+ ret = request_threaded_irq(sdw->res->irq, sdw_cdns_irq,
+ sdw_cdns_thread, IRQF_SHARED, KBUILD_MODNAME,
+ &sdw->cdns);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "unable to grab IRQ %d, disabling device\n",
+ sdw->res->irq);
+ goto err_init;
+ }
+
+ return 0;
+
+err_init:
+ sdw_delete_bus_master(&sdw->cdns.bus);
+err_master_reg:
+ return ret;
+}
+
+static int intel_remove(struct platform_device *pdev)
+{
+ struct sdw_intel *sdw;
+
+ sdw = platform_get_drvdata(pdev);
+
+ free_irq(sdw->res->irq, sdw);
+ sdw_delete_bus_master(&sdw->cdns.bus);
+
+ return 0;
+}
+
+static struct platform_driver sdw_intel_drv = {
+ .probe = intel_probe,
+ .remove = intel_remove,
+ .driver = {
+ .name = "int-sdw",
+
+ },
+};
+
+module_platform_driver(sdw_intel_drv);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:int-sdw");
+MODULE_DESCRIPTION("Intel Soundwire Master Driver");
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
new file mode 100644
index 000000000000..ffa30d9535a2
--- /dev/null
+++ b/drivers/soundwire/intel.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#ifndef __SDW_INTEL_LOCAL_H
+#define __SDW_INTEL_LOCAL_H
+
+/**
+ * struct sdw_intel_res - Soundwire link resources
+ * @registers: Link IO registers base
+ * @shim: Audio shim pointer
+ * @alh: ALH (Audio Link Hub) pointer
+ * @irq: Interrupt line
+ *
+ * This is set as pdata for each link instance.
+ */
+struct sdw_intel_link_res {
+ void __iomem *registers;
+ void __iomem *shim;
+ void __iomem *alh;
+ int irq;
+};
+
+#endif /* __SDW_INTEL_LOCAL_H */
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
new file mode 100644
index 000000000000..6f2bb99526f2
--- /dev/null
+++ b/drivers/soundwire/intel_init.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+/*
+ * SDW Intel Init Routines
+ *
+ * Initializes and creates SDW devices based on ACPI and Hardware values
+ */
+
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+#include <linux/soundwire/sdw_intel.h>
+#include "intel.h"
+
+#define SDW_MAX_LINKS 4
+#define SDW_SHIM_LCAP 0x0
+#define SDW_SHIM_BASE 0x2C000
+#define SDW_ALH_BASE 0x2C800
+#define SDW_LINK_BASE 0x30000
+#define SDW_LINK_SIZE 0x10000
+
+struct sdw_link_data {
+ struct sdw_intel_link_res res;
+ struct platform_device *pdev;
+};
+
+struct sdw_intel_ctx {
+ int count;
+ struct sdw_link_data *links;
+};
+
+static int sdw_intel_cleanup_pdev(struct sdw_intel_ctx *ctx)
+{
+ struct sdw_link_data *link = ctx->links;
+ int i;
+
+ if (!link)
+ return 0;
+
+ for (i = 0; i < ctx->count; i++) {
+ if (link->pdev)
+ platform_device_unregister(link->pdev);
+ link++;
+ }
+
+ kfree(ctx->links);
+ ctx->links = NULL;
+
+ return 0;
+}
+
+static struct sdw_intel_ctx
+*sdw_intel_add_controller(struct sdw_intel_res *res)
+{
+ struct platform_device_info pdevinfo;
+ struct platform_device *pdev;
+ struct sdw_link_data *link;
+ struct sdw_intel_ctx *ctx;
+ struct acpi_device *adev;
+ int ret, i;
+ u8 count;
+ u32 caps;
+
+ if (acpi_bus_get_device(res->handle, &adev))
+ return NULL;
+
+ /* Found controller, find links supported */
+ count = 0;
+ ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
+ "mipi-sdw-master-count", &count, 1);
+
+ /* Don't fail on error, continue and use hw value */
+ if (ret) {
+ dev_err(&adev->dev,
+ "Failed to read mipi-sdw-master-count: %d\n", ret);
+ count = SDW_MAX_LINKS;
+ }
+
+ /* Check SNDWLCAP.LCOUNT */
+ caps = ioread32(res->mmio_base + SDW_SHIM_BASE + SDW_SHIM_LCAP);
+
+ /* Check HW supported vs property value and use min of two */
+ count = min_t(u8, caps, count);
+
+ /* Check count is within bounds */
+ if (count > SDW_MAX_LINKS) {
+ dev_err(&adev->dev, "Link count %d exceeds max %d\n",
+ count, SDW_MAX_LINKS);
+ return NULL;
+ }
+
+ dev_dbg(&adev->dev, "Creating %d SDW Link devices\n", count);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->count = count;
+ ctx->links = kcalloc(ctx->count, sizeof(*ctx->links), GFP_KERNEL);
+ if (!ctx->links)
+ goto link_err;
+
+ link = ctx->links;
+
+ /* Create SDW Master devices */
+ for (i = 0; i < count; i++) {
+
+ link->res.irq = res->irq;
+ link->res.registers = res->mmio_base + SDW_LINK_BASE
+ + (SDW_LINK_SIZE * i);
+ link->res.shim = res->mmio_base + SDW_SHIM_BASE;
+ link->res.alh = res->mmio_base + SDW_ALH_BASE;
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+
+ pdevinfo.parent = res->parent;
+ pdevinfo.name = "int-sdw";
+ pdevinfo.id = i;
+ pdevinfo.fwnode = acpi_fwnode_handle(adev);
+ pdevinfo.data = &link->res;
+ pdevinfo.size_data = sizeof(link->res);
+
+ pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(pdev)) {
+ dev_err(&adev->dev,
+ "platform device creation failed: %ld\n",
+ PTR_ERR(pdev));
+ goto pdev_err;
+ }
+
+ link->pdev = pdev;
+ link++;
+ }
+
+ return ctx;
+
+pdev_err:
+ sdw_intel_cleanup_pdev(ctx);
+link_err:
+ kfree(ctx);
+ return NULL;
+}
+
+static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
+ void *cdata, void **return_value)
+{
+ struct sdw_intel_res *res = cdata;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev)) {
+ dev_err(&adev->dev, "Couldn't find ACPI handle\n");
+ return AE_NOT_FOUND;
+ }
+
+ res->handle = handle;
+ return AE_OK;
+}
+
+/**
+ * sdw_intel_init() - SoundWire Intel init routine
+ * @parent_handle: ACPI parent handle
+ * @res: resource data
+ *
+ * This scans the namespace and creates SoundWire link controller devices
+ * based on the info queried.
+ */
+void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res)
+{
+ acpi_status status;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
+ parent_handle, 1,
+ sdw_intel_acpi_cb,
+ NULL, res, NULL);
+ if (ACPI_FAILURE(status))
+ return NULL;
+
+ return sdw_intel_add_controller(res);
+}
+EXPORT_SYMBOL(sdw_intel_init);
+
+/**
+ * sdw_intel_exit() - SoundWire Intel exit
+ * @arg: callback context
+ *
+ * Delete the controller instances created and cleanup
+ */
+void sdw_intel_exit(void *arg)
+{
+ struct sdw_intel_ctx *ctx = arg;
+
+ sdw_intel_cleanup_pdev(ctx);
+ kfree(ctx);
+}
+EXPORT_SYMBOL(sdw_intel_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Intel Soundwire Init Library");
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
new file mode 100644
index 000000000000..fdeba0c3b589
--- /dev/null
+++ b/drivers/soundwire/mipi_disco.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+/*
+ * MIPI Discovery And Configuration (DisCo) Specification for SoundWire
+ * specifies properties to be implemented for SoundWire Masters and Slaves.
+ * The DisCo spec doesn't mandate these properties. However, SDW bus cannot
+ * work without knowing these values.
+ *
+ * The helper functions read the Master and Slave properties. Implementers
+ * of Master or Slave drivers can use any of the below three mechanisms:
+ * a) Use these APIs here as .read_prop() callback for Master and Slave
+ * b) Implement own methods and set those as .read_prop(), but invoke
+ * APIs in this file for generic read and override the values with
+ * platform specific data
+ * c) Implement ones own methods which do not use anything provided
+ * here
+ */
+
+#include <linux/device.h>
+#include <linux/property.h>
+#include <linux/mod_devicetable.h>
+#include <linux/soundwire/sdw.h>
+#include "bus.h"
+
+/**
+ * sdw_master_read_prop() - Read Master properties
+ * @bus: SDW bus instance
+ */
+int sdw_master_read_prop(struct sdw_bus *bus)
+{
+ struct sdw_master_prop *prop = &bus->prop;
+ struct fwnode_handle *link;
+ char name[32];
+ int nval, i;
+
+ device_property_read_u32(bus->dev,
+ "mipi-sdw-sw-interface-revision", &prop->revision);
+
+ /* Find master handle */
+ snprintf(name, sizeof(name),
+ "mipi-sdw-master-%d-subproperties", bus->link_id);
+
+ link = device_get_named_child_node(bus->dev, name);
+ if (!link) {
+ dev_err(bus->dev, "Master node %s not found\n", name);
+ return -EIO;
+ }
+
+ if (fwnode_property_read_bool(link,
+ "mipi-sdw-clock-stop-mode0-supported") == true)
+ prop->clk_stop_mode = SDW_CLK_STOP_MODE0;
+
+ if (fwnode_property_read_bool(link,
+ "mipi-sdw-clock-stop-mode1-supported") == true)
+ prop->clk_stop_mode |= SDW_CLK_STOP_MODE1;
+
+ fwnode_property_read_u32(link,
+ "mipi-sdw-max-clock-frequency", &prop->max_freq);
+
+ nval = fwnode_property_read_u32_array(link,
+ "mipi-sdw-clock-frequencies-supported", NULL, 0);
+ if (nval > 0) {
+
+ prop->num_freq = nval;
+ prop->freq = devm_kcalloc(bus->dev, prop->num_freq,
+ sizeof(*prop->freq), GFP_KERNEL);
+ if (!prop->freq)
+ return -ENOMEM;
+
+ fwnode_property_read_u32_array(link,
+ "mipi-sdw-clock-frequencies-supported",
+ prop->freq, prop->num_freq);
+ }
+
+ /*
+ * Check the frequencies supported. If FW doesn't provide max
+ * freq, then populate here by checking values.
+ */
+ if (!prop->max_freq && prop->freq) {
+ prop->max_freq = prop->freq[0];
+ for (i = 1; i < prop->num_freq; i++) {
+ if (prop->freq[i] > prop->max_freq)
+ prop->max_freq = prop->freq[i];
+ }
+ }
+
+ nval = fwnode_property_read_u32_array(link,
+ "mipi-sdw-supported-clock-gears", NULL, 0);
+ if (nval > 0) {
+
+ prop->num_clk_gears = nval;
+ prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears,
+ sizeof(*prop->clk_gears), GFP_KERNEL);
+ if (!prop->clk_gears)
+ return -ENOMEM;
+
+ fwnode_property_read_u32_array(link,
+ "mipi-sdw-supported-clock-gears",
+ prop->clk_gears, prop->num_clk_gears);
+ }
+
+ fwnode_property_read_u32(link, "mipi-sdw-default-frame-rate",
+ &prop->default_frame_rate);
+
+ fwnode_property_read_u32(link, "mipi-sdw-default-frame-row-size",
+ &prop->default_row);
+
+ fwnode_property_read_u32(link, "mipi-sdw-default-frame-col-size",
+ &prop->default_col);
+
+ prop->dynamic_frame = fwnode_property_read_bool(link,
+ "mipi-sdw-dynamic-frame-shape");
+
+ fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold",
+ &prop->err_threshold);
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_master_read_prop);
+
+static int sdw_slave_read_dp0(struct sdw_slave *slave,
+ struct fwnode_handle *port, struct sdw_dp0_prop *dp0)
+{
+ int nval;
+
+ fwnode_property_read_u32(port, "mipi-sdw-port-max-wordlength",
+ &dp0->max_word);
+
+ fwnode_property_read_u32(port, "mipi-sdw-port-min-wordlength",
+ &dp0->min_word);
+
+ nval = fwnode_property_read_u32_array(port,
+ "mipi-sdw-port-wordlength-configs", NULL, 0);
+ if (nval > 0) {
+
+ dp0->num_words = nval;
+ dp0->words = devm_kcalloc(&slave->dev,
+ dp0->num_words, sizeof(*dp0->words),
+ GFP_KERNEL);
+ if (!dp0->words)
+ return -ENOMEM;
+
+ fwnode_property_read_u32_array(port,
+ "mipi-sdw-port-wordlength-configs",
+ dp0->words, dp0->num_words);
+ }
+
+ dp0->flow_controlled = fwnode_property_read_bool(
+ port, "mipi-sdw-bra-flow-controlled");
+
+ dp0->simple_ch_prep_sm = fwnode_property_read_bool(
+ port, "mipi-sdw-simplified-channel-prepare-sm");
+
+ dp0->device_interrupts = fwnode_property_read_bool(
+ port, "mipi-sdw-imp-def-dp0-interrupts-supported");
+
+ return 0;
+}
+
+static int sdw_slave_read_dpn(struct sdw_slave *slave,
+ struct sdw_dpn_prop *dpn, int count, int ports, char *type)
+{
+ struct fwnode_handle *node;
+ u32 bit, i = 0;
+ int nval;
+ unsigned long addr;
+ char name[40];
+
+ addr = ports;
+ /* valid ports are 1 to 14 so apply mask */
+ addr &= GENMASK(14, 1);
+
+ for_each_set_bit(bit, &addr, 32) {
+ snprintf(name, sizeof(name),
+ "mipi-sdw-dp-%d-%s-subproperties", bit, type);
+
+ dpn[i].num = bit;
+
+ node = device_get_named_child_node(&slave->dev, name);
+ if (!node) {
+ dev_err(&slave->dev, "%s dpN not found\n", name);
+ return -EIO;
+ }
+
+ fwnode_property_read_u32(node, "mipi-sdw-port-max-wordlength",
+ &dpn[i].max_word);
+ fwnode_property_read_u32(node, "mipi-sdw-port-min-wordlength",
+ &dpn[i].min_word);
+
+ nval = fwnode_property_read_u32_array(node,
+ "mipi-sdw-port-wordlength-configs", NULL, 0);
+ if (nval > 0) {
+
+ dpn[i].num_words = nval;
+ dpn[i].words = devm_kcalloc(&slave->dev,
+ dpn[i].num_words,
+ sizeof(*dpn[i].words), GFP_KERNEL);
+ if (!dpn[i].words)
+ return -ENOMEM;
+
+ fwnode_property_read_u32_array(node,
+ "mipi-sdw-port-wordlength-configs",
+ dpn[i].words, dpn[i].num_words);
+ }
+
+ fwnode_property_read_u32(node, "mipi-sdw-data-port-type",
+ &dpn[i].type);
+
+ fwnode_property_read_u32(node,
+ "mipi-sdw-max-grouping-supported",
+ &dpn[i].max_grouping);
+
+ dpn[i].simple_ch_prep_sm = fwnode_property_read_bool(node,
+ "mipi-sdw-simplified-channelprepare-sm");
+
+ fwnode_property_read_u32(node,
+ "mipi-sdw-port-channelprepare-timeout",
+ &dpn[i].ch_prep_timeout);
+
+ fwnode_property_read_u32(node,
+ "mipi-sdw-imp-def-dpn-interrupts-supported",
+ &dpn[i].device_interrupts);
+
+ fwnode_property_read_u32(node, "mipi-sdw-min-channel-number",
+ &dpn[i].min_ch);
+
+ fwnode_property_read_u32(node, "mipi-sdw-max-channel-number",
+ &dpn[i].max_ch);
+
+ nval = fwnode_property_read_u32_array(node,
+ "mipi-sdw-channel-number-list", NULL, 0);
+ if (nval > 0) {
+
+ dpn[i].num_ch = nval;
+ dpn[i].ch = devm_kcalloc(&slave->dev, dpn[i].num_ch,
+ sizeof(*dpn[i].ch), GFP_KERNEL);
+ if (!dpn[i].ch)
+ return -ENOMEM;
+
+ fwnode_property_read_u32_array(node,
+ "mipi-sdw-channel-number-list",
+ dpn[i].ch, dpn[i].num_ch);
+ }
+
+ nval = fwnode_property_read_u32_array(node,
+ "mipi-sdw-channel-combination-list", NULL, 0);
+ if (nval > 0) {
+
+ dpn[i].num_ch_combinations = nval;
+ dpn[i].ch_combinations = devm_kcalloc(&slave->dev,
+ dpn[i].num_ch_combinations,
+ sizeof(*dpn[i].ch_combinations),
+ GFP_KERNEL);
+ if (!dpn[i].ch_combinations)
+ return -ENOMEM;
+
+ fwnode_property_read_u32_array(node,
+ "mipi-sdw-channel-combination-list",
+ dpn[i].ch_combinations,
+ dpn[i].num_ch_combinations);
+ }
+
+ fwnode_property_read_u32(node,
+ "mipi-sdw-modes-supported", &dpn[i].modes);
+
+ fwnode_property_read_u32(node, "mipi-sdw-max-async-buffer",
+ &dpn[i].max_async_buffer);
+
+ dpn[i].block_pack_mode = fwnode_property_read_bool(node,
+ "mipi-sdw-block-packing-mode");
+
+ fwnode_property_read_u32(node, "mipi-sdw-port-encoding-type",
+ &dpn[i].port_encoding);
+
+ /* TODO: Read audio mode */
+
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * sdw_slave_read_prop() - Read Slave properties
+ * @slave: SDW Slave
+ */
+int sdw_slave_read_prop(struct sdw_slave *slave)
+{
+ struct sdw_slave_prop *prop = &slave->prop;
+ struct device *dev = &slave->dev;
+ struct fwnode_handle *port;
+ int num_of_ports, nval, i, dp0 = 0;
+
+ device_property_read_u32(dev, "mipi-sdw-sw-interface-revision",
+ &prop->mipi_revision);
+
+ prop->wake_capable = device_property_read_bool(dev,
+ "mipi-sdw-wake-up-unavailable");
+ prop->wake_capable = !prop->wake_capable;
+
+ prop->test_mode_capable = device_property_read_bool(dev,
+ "mipi-sdw-test-mode-supported");
+
+ prop->clk_stop_mode1 = false;
+ if (device_property_read_bool(dev,
+ "mipi-sdw-clock-stop-mode1-supported"))
+ prop->clk_stop_mode1 = true;
+
+ prop->simple_clk_stop_capable = device_property_read_bool(dev,
+ "mipi-sdw-simplified-clockstopprepare-sm-supported");
+
+ device_property_read_u32(dev, "mipi-sdw-clockstopprepare-timeout",
+ &prop->clk_stop_timeout);
+
+ device_property_read_u32(dev, "mipi-sdw-slave-channelprepare-timeout",
+ &prop->ch_prep_timeout);
+
+ device_property_read_u32(dev,
+ "mipi-sdw-clockstopprepare-hard-reset-behavior",
+ &prop->reset_behave);
+
+ prop->high_PHY_capable = device_property_read_bool(dev,
+ "mipi-sdw-highPHY-capable");
+
+ prop->paging_support = device_property_read_bool(dev,
+ "mipi-sdw-paging-support");
+
+ prop->bank_delay_support = device_property_read_bool(dev,
+ "mipi-sdw-bank-delay-support");
+
+ device_property_read_u32(dev,
+ "mipi-sdw-port15-read-behavior", &prop->p15_behave);
+
+ device_property_read_u32(dev, "mipi-sdw-master-count",
+ &prop->master_count);
+
+ device_property_read_u32(dev, "mipi-sdw-source-port-list",
+ &prop->source_ports);
+
+ device_property_read_u32(dev, "mipi-sdw-sink-port-list",
+ &prop->sink_ports);
+
+ /* Read dp0 properties */
+ port = device_get_named_child_node(dev, "mipi-sdw-dp-0-subproperties");
+ if (!port) {
+ dev_dbg(dev, "DP0 node not found!!\n");
+ } else {
+
+ prop->dp0_prop = devm_kzalloc(&slave->dev,
+ sizeof(*prop->dp0_prop), GFP_KERNEL);
+ if (!prop->dp0_prop)
+ return -ENOMEM;
+
+ sdw_slave_read_dp0(slave, port, prop->dp0_prop);
+ dp0 = 1;
+ }
+
+ /*
+ * Based on each DPn port, get source and sink dpn properties.
+ * Also, some ports can operate as both source or sink.
+ */
+
+ /* Allocate memory for set bits in port lists */
+ nval = hweight32(prop->source_ports);
+ prop->src_dpn_prop = devm_kcalloc(&slave->dev, nval,
+ sizeof(*prop->src_dpn_prop), GFP_KERNEL);
+ if (!prop->src_dpn_prop)
+ return -ENOMEM;
+
+ /* Read dpn properties for source port(s) */
+ sdw_slave_read_dpn(slave, prop->src_dpn_prop, nval,
+ prop->source_ports, "source");
+
+ nval = hweight32(prop->sink_ports);
+ prop->sink_dpn_prop = devm_kcalloc(&slave->dev, nval,
+ sizeof(*prop->sink_dpn_prop), GFP_KERNEL);
+ if (!prop->sink_dpn_prop)
+ return -ENOMEM;
+
+ /* Read dpn properties for sink port(s) */
+ sdw_slave_read_dpn(slave, prop->sink_dpn_prop, nval,
+ prop->sink_ports, "sink");
+
+ /* some ports are bidirectional so check total ports by ORing */
+ nval = prop->source_ports | prop->sink_ports;
+ num_of_ports = hweight32(nval) + dp0; /* add DP0 */
+
+ /* Allocate port_ready based on num_of_ports */
+ slave->port_ready = devm_kcalloc(&slave->dev, num_of_ports,
+ sizeof(*slave->port_ready), GFP_KERNEL);
+ if (!slave->port_ready)
+ return -ENOMEM;
+
+ /* Initialize completion */
+ for (i = 0; i < num_of_ports; i++)
+ init_completion(&slave->port_ready[i]);
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_slave_read_prop);
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
new file mode 100644
index 000000000000..ac103bd0c176
--- /dev/null
+++ b/drivers/soundwire/slave.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include "bus.h"
+
+static void sdw_slave_release(struct device *dev)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+ kfree(slave);
+}
+
+static int sdw_slave_add(struct sdw_bus *bus,
+ struct sdw_slave_id *id, struct fwnode_handle *fwnode)
+{
+ struct sdw_slave *slave;
+ int ret;
+
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ if (!slave)
+ return -ENOMEM;
+
+ /* Initialize data structure */
+ memcpy(&slave->id, id, sizeof(*id));
+ slave->dev.parent = bus->dev;
+ slave->dev.fwnode = fwnode;
+
+ /* name shall be sdw:link:mfg:part:class:unique */
+ dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
+ bus->link_id, id->mfg_id, id->part_id,
+ id->class_id, id->unique_id);
+
+ slave->dev.release = sdw_slave_release;
+ slave->dev.bus = &sdw_bus_type;
+ slave->bus = bus;
+ slave->status = SDW_SLAVE_UNATTACHED;
+ slave->dev_num = 0;
+
+ mutex_lock(&bus->bus_lock);
+ list_add_tail(&slave->node, &bus->slaves);
+ mutex_unlock(&bus->bus_lock);
+
+ ret = device_register(&slave->dev);
+ if (ret) {
+ dev_err(bus->dev, "Failed to add slave: ret %d\n", ret);
+
+ /*
+ * On err, don't free but drop ref as this will be freed
+ * when release method is invoked.
+ */
+ mutex_lock(&bus->bus_lock);
+ list_del(&slave->node);
+ mutex_unlock(&bus->bus_lock);
+ put_device(&slave->dev);
+ }
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+/*
+ * sdw_acpi_find_slaves() - Find Slave devices in Master ACPI node
+ * @bus: SDW bus instance
+ *
+ * Scans Master ACPI node for SDW child Slave devices and registers it.
+ */
+int sdw_acpi_find_slaves(struct sdw_bus *bus)
+{
+ struct acpi_device *adev, *parent;
+
+ parent = ACPI_COMPANION(bus->dev);
+ if (!parent) {
+ dev_err(bus->dev, "Can't find parent for acpi bind\n");
+ return -ENODEV;
+ }
+
+ list_for_each_entry(adev, &parent->children, node) {
+ unsigned long long addr;
+ struct sdw_slave_id id;
+ unsigned int link_id;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle,
+ METHOD_NAME__ADR, NULL, &addr);
+
+ if (ACPI_FAILURE(status)) {
+ dev_err(bus->dev, "_ADR resolution failed: %x\n",
+ status);
+ return status;
+ }
+
+ /* Extract link id from ADR, Bit 51 to 48 (included) */
+ link_id = (addr >> 48) & GENMASK(3, 0);
+
+ /* Check for link_id match */
+ if (link_id != bus->link_id)
+ continue;
+
+ sdw_extract_slave_id(bus, addr, &id);
+
+ /*
+ * don't error check for sdw_slave_add as we want to continue
+ * adding Slaves
+ */
+ sdw_slave_add(bus, &id, acpi_fwnode_handle(adev));
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 48d5327d38d4..8ca549032c27 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -10,11 +10,13 @@
* Since the driver does not declare any device ids, you must allocate
* id and bind the device to the driver yourself. For example:
*
+ * Associate Network GUID with UIO device
* # echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" \
- * > /sys/bus/vmbus/drivers/uio_hv_generic
- * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \
+ * > /sys/bus/vmbus/drivers/uio_hv_generic/new_id
+ * Then rebind
+ * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
* > /sys/bus/vmbus/drivers/hv_netvsc/unbind
- * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \
+ * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
* > /sys/bus/vmbus/drivers/uio_hv_generic/bind
*/
@@ -37,6 +39,10 @@
#define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>"
#define DRIVER_DESC "Generic UIO driver for VMBus devices"
+#define HV_RING_SIZE 512 /* pages */
+#define SEND_BUFFER_SIZE (15 * 1024 * 1024)
+#define RECV_BUFFER_SIZE (15 * 1024 * 1024)
+
/*
* List of resources to be mapped to user space
* can be extended up to MAX_UIO_MAPS(5) items
@@ -45,32 +51,22 @@ enum hv_uio_map {
TXRX_RING_MAP = 0,
INT_PAGE_MAP,
MON_PAGE_MAP,
+ RECV_BUF_MAP,
+ SEND_BUF_MAP
};
-#define HV_RING_SIZE 512
-
struct hv_uio_private_data {
struct uio_info info;
struct hv_device *device;
-};
-
-static int
-hv_uio_mmap(struct uio_info *info, struct vm_area_struct *vma)
-{
- int mi;
- if (vma->vm_pgoff >= MAX_UIO_MAPS)
- return -EINVAL;
+ void *recv_buf;
+ u32 recv_gpadl;
+ char recv_name[32]; /* "recv_4294967295" */
- if (info->mem[vma->vm_pgoff].size == 0)
- return -EINVAL;
-
- mi = (int)vma->vm_pgoff;
-
- return remap_pfn_range(vma, vma->vm_start,
- info->mem[mi].addr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
-}
+ void *send_buf;
+ u32 send_gpadl;
+ char send_name[32];
+};
/*
* This is the irqcontrol callback to be registered to uio_info.
@@ -107,6 +103,36 @@ static void hv_uio_channel_cb(void *context)
uio_event_notify(&pdata->info);
}
+/*
+ * Callback from vmbus_event when channel is rescinded.
+ */
+static void hv_uio_rescind(struct vmbus_channel *channel)
+{
+ struct hv_device *hv_dev = channel->primary_channel->device_obj;
+ struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
+
+ /*
+ * Turn off the interrupt file handle
+ * Next read for event will return -EIO
+ */
+ pdata->info.irq = 0;
+
+ /* Wake up reader */
+ uio_event_notify(&pdata->info);
+}
+
+static void
+hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
+{
+ if (pdata->send_gpadl)
+ vmbus_teardown_gpadl(dev->channel, pdata->send_gpadl);
+ vfree(pdata->send_buf);
+
+ if (pdata->recv_gpadl)
+ vmbus_teardown_gpadl(dev->channel, pdata->recv_gpadl);
+ vfree(pdata->recv_buf);
+}
+
static int
hv_uio_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
@@ -124,36 +150,82 @@ hv_uio_probe(struct hv_device *dev,
if (ret)
goto fail;
+ /* Communicating with host has to be via shared memory not hypercall */
+ if (!dev->channel->offermsg.monitor_allocated) {
+ dev_err(&dev->device, "vmbus channel requires hypercall\n");
+ ret = -ENOTSUPP;
+ goto fail_close;
+ }
+
dev->channel->inbound.ring_buffer->interrupt_mask = 1;
- set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
+ set_channel_read_mode(dev->channel, HV_CALL_ISR);
/* Fill general uio info */
pdata->info.name = "uio_hv_generic";
pdata->info.version = DRIVER_VERSION;
pdata->info.irqcontrol = hv_uio_irqcontrol;
- pdata->info.mmap = hv_uio_mmap;
pdata->info.irq = UIO_IRQ_CUSTOM;
/* mem resources */
pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
pdata->info.mem[TXRX_RING_MAP].addr
- = virt_to_phys(dev->channel->ringbuffer_pages);
+ = (uintptr_t)dev->channel->ringbuffer_pages;
pdata->info.mem[TXRX_RING_MAP].size
- = dev->channel->ringbuffer_pagecount * PAGE_SIZE;
+ = dev->channel->ringbuffer_pagecount << PAGE_SHIFT;
pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
pdata->info.mem[INT_PAGE_MAP].name = "int_page";
- pdata->info.mem[INT_PAGE_MAP].addr =
- virt_to_phys(vmbus_connection.int_page);
+ pdata->info.mem[INT_PAGE_MAP].addr
+ = (uintptr_t)vmbus_connection.int_page;
pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
- pdata->info.mem[MON_PAGE_MAP].name = "monitor_pages";
- pdata->info.mem[MON_PAGE_MAP].addr =
- virt_to_phys(vmbus_connection.monitor_pages[1]);
+ pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
+ pdata->info.mem[MON_PAGE_MAP].addr
+ = (uintptr_t)vmbus_connection.monitor_pages[1];
pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
+ pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
+ if (pdata->recv_buf == NULL) {
+ ret = -ENOMEM;
+ goto fail_close;
+ }
+
+ ret = vmbus_establish_gpadl(dev->channel, pdata->recv_buf,
+ RECV_BUFFER_SIZE, &pdata->recv_gpadl);
+ if (ret)
+ goto fail_close;
+
+ /* put Global Physical Address Label in name */
+ snprintf(pdata->recv_name, sizeof(pdata->recv_name),
+ "recv:%u", pdata->recv_gpadl);
+ pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
+ pdata->info.mem[RECV_BUF_MAP].addr
+ = (uintptr_t)pdata->recv_buf;
+ pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
+ pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
+
+
+ pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
+ if (pdata->send_buf == NULL) {
+ ret = -ENOMEM;
+ goto fail_close;
+ }
+
+ ret = vmbus_establish_gpadl(dev->channel, pdata->send_buf,
+ SEND_BUFFER_SIZE, &pdata->send_gpadl);
+ if (ret)
+ goto fail_close;
+
+ snprintf(pdata->send_name, sizeof(pdata->send_name),
+ "send:%u", pdata->send_gpadl);
+ pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
+ pdata->info.mem[SEND_BUF_MAP].addr
+ = (uintptr_t)pdata->send_buf;
+ pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
+ pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
+
pdata->info.priv = pdata;
pdata->device = dev;
@@ -163,11 +235,14 @@ hv_uio_probe(struct hv_device *dev,
goto fail_close;
}
+ vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
+
hv_set_drvdata(dev, pdata);
return 0;
fail_close:
+ hv_uio_cleanup(dev, pdata);
vmbus_close(dev->channel);
fail:
kfree(pdata);
@@ -184,6 +259,7 @@ hv_uio_remove(struct hv_device *dev)
return 0;
uio_unregister_device(&pdata->info);
+ hv_uio_cleanup(dev, pdata);
hv_set_drvdata(dev, NULL);
vmbus_close(dev->channel);
kfree(pdata);
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index 99ebdde590f8..8d9cdfbd6bcc 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -30,4 +30,5 @@ config FSL_HV_MANAGER
4) A kernel interface for receiving callbacks when a managed
partition shuts down.
+source "drivers/virt/vboxguest/Kconfig"
endif
diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile
index c47f04dd343b..d3f7b2540890 100644
--- a/drivers/virt/Makefile
+++ b/drivers/virt/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o
+obj-y += vboxguest/
diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig
new file mode 100644
index 000000000000..fffd318a10fe
--- /dev/null
+++ b/drivers/virt/vboxguest/Kconfig
@@ -0,0 +1,18 @@
+config VBOXGUEST
+ tristate "Virtual Box Guest integration support"
+ depends on X86 && PCI && INPUT
+ help
+ This is a driver for the Virtual Box Guest PCI device used in
+ Virtual Box virtual machines. Enabling this driver will add
+ support for Virtual Box Guest integration features such as
+ copy-and-paste, seamless mode and OpenGL pass-through.
+
+ This driver also offers vboxguest IPC functionality which is needed
+ for the vboxfs driver which offers folder sharing support.
+
+ If you enable this driver you should also enable the VBOXVIDEO option.
+
+ Although it is possible to build this module in, it is advised
+ to build this driver as a module, so that it can be updated
+ independently of the kernel. Select M to build this driver as a
+ module.
diff --git a/drivers/virt/vboxguest/Makefile b/drivers/virt/vboxguest/Makefile
new file mode 100644
index 000000000000..203b8f465817
--- /dev/null
+++ b/drivers/virt/vboxguest/Makefile
@@ -0,0 +1,3 @@
+vboxguest-y := vboxguest_linux.o vboxguest_core.o vboxguest_utils.o
+
+obj-$(CONFIG_VBOXGUEST) += vboxguest.o
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
new file mode 100644
index 000000000000..190dbf8cfcb5
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -0,0 +1,1571 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
+ *
+ * Copyright (C) 2007-2016 Oracle Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include <linux/vmalloc.h>
+#include "vboxguest_core.h"
+#include "vboxguest_version.h"
+
+/* Get the pointer to the first HGCM parameter. */
+#define VBG_IOCTL_HGCM_CALL_PARMS(a) \
+ ((struct vmmdev_hgcm_function_parameter *)( \
+ (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
+/* Get the pointer to the first HGCM parameter in a 32-bit request. */
+#define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
+ ((struct vmmdev_hgcm_function_parameter32 *)( \
+ (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
+
+#define GUEST_MAPPINGS_TRIES 5
+
+/**
+ * Reserves memory in which the VMM can relocate any guest mappings
+ * that are floating around.
+ *
+ * This operation is a little bit tricky since the VMM might not accept
+ * just any address because of address clashes between the three contexts
+ * it operates in, so we try several times.
+ *
+ * Failure to reserve the guest mappings is ignored.
+ *
+ * @gdev: The Guest extension device.
+ */
+static void vbg_guest_mappings_init(struct vbg_dev *gdev)
+{
+ struct vmmdev_hypervisorinfo *req;
+ void *guest_mappings[GUEST_MAPPINGS_TRIES];
+ struct page **pages = NULL;
+ u32 size, hypervisor_size;
+ int i, rc;
+
+ /* Query the required space. */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
+ if (!req)
+ return;
+
+ req->hypervisor_start = 0;
+ req->hypervisor_size = 0;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ goto out;
+
+ /*
+ * The VMM will report back if there is nothing it wants to map, like
+ * for instance in VT-x and AMD-V mode.
+ */
+ if (req->hypervisor_size == 0)
+ goto out;
+
+ hypervisor_size = req->hypervisor_size;
+ /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
+ size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
+
+ pages = kmalloc(sizeof(*pages) * (size >> PAGE_SHIFT), GFP_KERNEL);
+ if (!pages)
+ goto out;
+
+ gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
+ if (!gdev->guest_mappings_dummy_page)
+ goto out;
+
+ for (i = 0; i < (size >> PAGE_SHIFT); i++)
+ pages[i] = gdev->guest_mappings_dummy_page;
+
+ /*
+ * Try several times, the VMM might not accept some addresses because
+ * of address clashes between the three contexts.
+ */
+ for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
+ guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
+ VM_MAP, PAGE_KERNEL_RO);
+ if (!guest_mappings[i])
+ break;
+
+ req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
+ req->header.rc = VERR_INTERNAL_ERROR;
+ req->hypervisor_size = hypervisor_size;
+ req->hypervisor_start =
+ (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc >= 0) {
+ gdev->guest_mappings = guest_mappings[i];
+ break;
+ }
+ }
+
+ /* Free vmap's from failed attempts. */
+ while (--i >= 0)
+ vunmap(guest_mappings[i]);
+
+ /* On failure free the dummy-page backing the vmap */
+ if (!gdev->guest_mappings) {
+ __free_page(gdev->guest_mappings_dummy_page);
+ gdev->guest_mappings_dummy_page = NULL;
+ }
+
+out:
+ kfree(req);
+ kfree(pages);
+}
+
+/**
+ * Undo what vbg_guest_mappings_init did.
+ *
+ * @gdev: The Guest extension device.
+ */
+static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
+{
+ struct vmmdev_hypervisorinfo *req;
+ int rc;
+
+ if (!gdev->guest_mappings)
+ return;
+
+ /*
+ * Tell the host that we're going to free the memory we reserved for
+ * it, the free it up. (Leak the memory if anything goes wrong here.)
+ */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
+ if (!req)
+ return;
+
+ req->hypervisor_start = 0;
+ req->hypervisor_size = 0;
+
+ rc = vbg_req_perform(gdev, req);
+
+ kfree(req);
+
+ if (rc < 0) {
+ vbg_err("%s error: %d\n", __func__, rc);
+ return;
+ }
+
+ vunmap(gdev->guest_mappings);
+ gdev->guest_mappings = NULL;
+
+ __free_page(gdev->guest_mappings_dummy_page);
+ gdev->guest_mappings_dummy_page = NULL;
+}
+
+/**
+ * Report the guest information to the host.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ */
+static int vbg_report_guest_info(struct vbg_dev *gdev)
+{
+ /*
+ * Allocate and fill in the two guest info reports.
+ */
+ struct vmmdev_guest_info *req1 = NULL;
+ struct vmmdev_guest_info2 *req2 = NULL;
+ int rc, ret = -ENOMEM;
+
+ req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
+ req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
+ if (!req1 || !req2)
+ goto out_free;
+
+ req1->interface_version = VMMDEV_VERSION;
+ req1->os_type = VMMDEV_OSTYPE_LINUX26;
+#if __BITS_PER_LONG == 64
+ req1->os_type |= VMMDEV_OSTYPE_X64;
+#endif
+
+ req2->additions_major = VBG_VERSION_MAJOR;
+ req2->additions_minor = VBG_VERSION_MINOR;
+ req2->additions_build = VBG_VERSION_BUILD;
+ req2->additions_revision = VBG_SVN_REV;
+ /* (no features defined yet) */
+ req2->additions_features = 0;
+ strlcpy(req2->name, VBG_VERSION_STRING,
+ sizeof(req2->name));
+
+ /*
+ * There are two protocols here:
+ * 1. INFO2 + INFO1. Supported by >=3.2.51.
+ * 2. INFO1 and optionally INFO2. The old protocol.
+ *
+ * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
+ * if not supported by the VMMDev (message ordering requirement).
+ */
+ rc = vbg_req_perform(gdev, req2);
+ if (rc >= 0) {
+ rc = vbg_req_perform(gdev, req1);
+ } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
+ rc = vbg_req_perform(gdev, req1);
+ if (rc >= 0) {
+ rc = vbg_req_perform(gdev, req2);
+ if (rc == VERR_NOT_IMPLEMENTED)
+ rc = VINF_SUCCESS;
+ }
+ }
+ ret = vbg_status_code_to_errno(rc);
+
+out_free:
+ kfree(req2);
+ kfree(req1);
+ return ret;
+}
+
+/**
+ * Report the guest driver status to the host.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @active: Flag whether the driver is now active or not.
+ */
+static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
+{
+ struct vmmdev_guest_status *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
+ if (!req)
+ return -ENOMEM;
+
+ req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
+ if (active)
+ req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
+ else
+ req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
+ req->flags = 0;
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
+ rc = VINF_SUCCESS;
+
+ kfree(req);
+
+ return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Inflate the balloon by one chunk. The caller owns the balloon mutex.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @chunk_idx: Index of the chunk.
+ */
+static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
+{
+ struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
+ struct page **pages;
+ int i, rc, ret;
+
+ pages = kmalloc(sizeof(*pages) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!pages)
+ return -ENOMEM;
+
+ req->header.size = sizeof(*req);
+ req->inflate = true;
+ req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
+
+ for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
+ pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
+ if (!pages[i]) {
+ ret = -ENOMEM;
+ goto out_error;
+ }
+
+ req->phys_page[i] = page_to_phys(pages[i]);
+ }
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ vbg_err("%s error, rc: %d\n", __func__, rc);
+ ret = vbg_status_code_to_errno(rc);
+ goto out_error;
+ }
+
+ gdev->mem_balloon.pages[chunk_idx] = pages;
+
+ return 0;
+
+out_error:
+ while (--i >= 0)
+ __free_page(pages[i]);
+ kfree(pages);
+
+ return ret;
+}
+
+/**
+ * Deflate the balloon by one chunk. The caller owns the balloon mutex.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @chunk_idx: Index of the chunk.
+ */
+static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
+{
+ struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
+ struct page **pages = gdev->mem_balloon.pages[chunk_idx];
+ int i, rc;
+
+ req->header.size = sizeof(*req);
+ req->inflate = false;
+ req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
+
+ for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
+ req->phys_page[i] = page_to_phys(pages[i]);
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ vbg_err("%s error, rc: %d\n", __func__, rc);
+ return vbg_status_code_to_errno(rc);
+ }
+
+ for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+ gdev->mem_balloon.pages[chunk_idx] = NULL;
+
+ return 0;
+}
+
+/**
+ * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
+ * the host wants the balloon to be and adjust accordingly.
+ */
+static void vbg_balloon_work(struct work_struct *work)
+{
+ struct vbg_dev *gdev =
+ container_of(work, struct vbg_dev, mem_balloon.work);
+ struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
+ u32 i, chunks;
+ int rc, ret;
+
+ /*
+ * Setting this bit means that we request the value from the host and
+ * change the guest memory balloon according to the returned value.
+ */
+ req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ vbg_err("%s error, rc: %d)\n", __func__, rc);
+ return;
+ }
+
+ /*
+ * The host always returns the same maximum amount of chunks, so
+ * we do this once.
+ */
+ if (!gdev->mem_balloon.max_chunks) {
+ gdev->mem_balloon.pages =
+ devm_kcalloc(gdev->dev, req->phys_mem_chunks,
+ sizeof(struct page **), GFP_KERNEL);
+ if (!gdev->mem_balloon.pages)
+ return;
+
+ gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
+ }
+
+ chunks = req->balloon_chunks;
+ if (chunks > gdev->mem_balloon.max_chunks) {
+ vbg_err("%s: illegal balloon size %u (max=%u)\n",
+ __func__, chunks, gdev->mem_balloon.max_chunks);
+ return;
+ }
+
+ if (chunks > gdev->mem_balloon.chunks) {
+ /* inflate */
+ for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
+ ret = vbg_balloon_inflate(gdev, i);
+ if (ret < 0)
+ return;
+
+ gdev->mem_balloon.chunks++;
+ }
+ } else {
+ /* deflate */
+ for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
+ ret = vbg_balloon_deflate(gdev, i);
+ if (ret < 0)
+ return;
+
+ gdev->mem_balloon.chunks--;
+ }
+ }
+}
+
+/**
+ * Callback for heartbeat timer.
+ */
+static void vbg_heartbeat_timer(struct timer_list *t)
+{
+ struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
+
+ vbg_req_perform(gdev, gdev->guest_heartbeat_req);
+ mod_timer(&gdev->heartbeat_timer,
+ msecs_to_jiffies(gdev->heartbeat_interval_ms));
+}
+
+/**
+ * Configure the host to check guest's heartbeat
+ * and get heartbeat interval from the host.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @enabled: Set true to enable guest heartbeat checks on host.
+ */
+static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
+{
+ struct vmmdev_heartbeat *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
+ if (!req)
+ return -ENOMEM;
+
+ req->enabled = enabled;
+ req->interval_ns = 0;
+ rc = vbg_req_perform(gdev, req);
+ do_div(req->interval_ns, 1000000); /* ns -> ms */
+ gdev->heartbeat_interval_ms = req->interval_ns;
+ kfree(req);
+
+ return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Initializes the heartbeat timer. This feature may be disabled by the host.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ */
+static int vbg_heartbeat_init(struct vbg_dev *gdev)
+{
+ int ret;
+
+ /* Make sure that heartbeat checking is disabled if we fail. */
+ ret = vbg_heartbeat_host_config(gdev, false);
+ if (ret < 0)
+ return ret;
+
+ ret = vbg_heartbeat_host_config(gdev, true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Preallocate the request to use it from the timer callback because:
+ * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
+ * and the timer callback runs at DISPATCH_LEVEL;
+ * 2) avoid repeated allocations.
+ */
+ gdev->guest_heartbeat_req = vbg_req_alloc(
+ sizeof(*gdev->guest_heartbeat_req),
+ VMMDEVREQ_GUEST_HEARTBEAT);
+ if (!gdev->guest_heartbeat_req)
+ return -ENOMEM;
+
+ vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
+ __func__, gdev->heartbeat_interval_ms);
+ mod_timer(&gdev->heartbeat_timer, 0);
+
+ return 0;
+}
+
+/**
+ * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
+ * @gdev: The Guest extension device.
+ */
+static void vbg_heartbeat_exit(struct vbg_dev *gdev)
+{
+ del_timer_sync(&gdev->heartbeat_timer);
+ vbg_heartbeat_host_config(gdev, false);
+ kfree(gdev->guest_heartbeat_req);
+
+}
+
+/**
+ * Applies a change to the bit usage tracker.
+ * Return: true if the mask changed, false if not.
+ * @tracker: The bit usage tracker.
+ * @changed: The bits to change.
+ * @previous: The previous value of the bits.
+ */
+static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
+ u32 changed, u32 previous)
+{
+ bool global_change = false;
+
+ while (changed) {
+ u32 bit = ffs(changed) - 1;
+ u32 bitmask = BIT(bit);
+
+ if (bitmask & previous) {
+ tracker->per_bit_usage[bit] -= 1;
+ if (tracker->per_bit_usage[bit] == 0) {
+ global_change = true;
+ tracker->mask &= ~bitmask;
+ }
+ } else {
+ tracker->per_bit_usage[bit] += 1;
+ if (tracker->per_bit_usage[bit] == 1) {
+ global_change = true;
+ tracker->mask |= bitmask;
+ }
+ }
+
+ changed &= ~bitmask;
+ }
+
+ return global_change;
+}
+
+/**
+ * Init and termination worker for resetting the (host) event filter on the host
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @fixed_events: Fixed events (init time).
+ */
+static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
+ u32 fixed_events)
+{
+ struct vmmdev_mask *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+ if (!req)
+ return -ENOMEM;
+
+ req->not_mask = U32_MAX & ~fixed_events;
+ req->or_mask = fixed_events;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ vbg_err("%s error, rc: %d\n", __func__, rc);
+
+ kfree(req);
+ return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Changes the event filter mask for the given session.
+ *
+ * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
+ * do session cleanup. Takes the session spinlock.
+ *
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @session: The session.
+ * @or_mask: The events to add.
+ * @not_mask: The events to remove.
+ * @session_termination: Set if we're called by the session cleanup code.
+ * This tweaks the error handling so we perform
+ * proper session cleanup even if the host
+ * misbehaves.
+ */
+static int vbg_set_session_event_filter(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ u32 or_mask, u32 not_mask,
+ bool session_termination)
+{
+ struct vmmdev_mask *req;
+ u32 changed, previous;
+ int rc, ret = 0;
+
+ /* Allocate a request buffer before taking the spinlock */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+ if (!req) {
+ if (!session_termination)
+ return -ENOMEM;
+ /* Ignore allocation failure, we must do session cleanup. */
+ }
+
+ mutex_lock(&gdev->session_mutex);
+
+ /* Apply the changes to the session mask. */
+ previous = session->event_filter;
+ session->event_filter |= or_mask;
+ session->event_filter &= ~not_mask;
+
+ /* If anything actually changed, update the global usage counters. */
+ changed = previous ^ session->event_filter;
+ if (!changed)
+ goto out;
+
+ vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
+ or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
+
+ if (gdev->event_filter_host == or_mask || !req)
+ goto out;
+
+ gdev->event_filter_host = or_mask;
+ req->or_mask = or_mask;
+ req->not_mask = ~or_mask;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ ret = vbg_status_code_to_errno(rc);
+
+ /* Failed, roll back (unless it's session termination time). */
+ gdev->event_filter_host = U32_MAX;
+ if (session_termination)
+ goto out;
+
+ vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
+ session->event_filter);
+ session->event_filter = previous;
+ }
+
+out:
+ mutex_unlock(&gdev->session_mutex);
+ kfree(req);
+
+ return ret;
+}
+
+/**
+ * Init and termination worker for set guest capabilities to zero on the host.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ */
+static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
+{
+ struct vmmdev_mask *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+ if (!req)
+ return -ENOMEM;
+
+ req->not_mask = U32_MAX;
+ req->or_mask = 0;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ vbg_err("%s error, rc: %d\n", __func__, rc);
+
+ kfree(req);
+ return vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Sets the guest capabilities for a session. Takes the session spinlock.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @session: The session.
+ * @or_mask: The capabilities to add.
+ * @not_mask: The capabilities to remove.
+ * @session_termination: Set if we're called by the session cleanup code.
+ * This tweaks the error handling so we perform
+ * proper session cleanup even if the host
+ * misbehaves.
+ */
+static int vbg_set_session_capabilities(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ u32 or_mask, u32 not_mask,
+ bool session_termination)
+{
+ struct vmmdev_mask *req;
+ u32 changed, previous;
+ int rc, ret = 0;
+
+ /* Allocate a request buffer before taking the spinlock */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+ if (!req) {
+ if (!session_termination)
+ return -ENOMEM;
+ /* Ignore allocation failure, we must do session cleanup. */
+ }
+
+ mutex_lock(&gdev->session_mutex);
+
+ /* Apply the changes to the session mask. */
+ previous = session->guest_caps;
+ session->guest_caps |= or_mask;
+ session->guest_caps &= ~not_mask;
+
+ /* If anything actually changed, update the global usage counters. */
+ changed = previous ^ session->guest_caps;
+ if (!changed)
+ goto out;
+
+ vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
+ or_mask = gdev->guest_caps_tracker.mask;
+
+ if (gdev->guest_caps_host == or_mask || !req)
+ goto out;
+
+ gdev->guest_caps_host = or_mask;
+ req->or_mask = or_mask;
+ req->not_mask = ~or_mask;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ ret = vbg_status_code_to_errno(rc);
+
+ /* Failed, roll back (unless it's session termination time). */
+ gdev->guest_caps_host = U32_MAX;
+ if (session_termination)
+ goto out;
+
+ vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
+ session->guest_caps);
+ session->guest_caps = previous;
+ }
+
+out:
+ mutex_unlock(&gdev->session_mutex);
+ kfree(req);
+
+ return ret;
+}
+
+/**
+ * vbg_query_host_version get the host feature mask and version information.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ */
+static int vbg_query_host_version(struct vbg_dev *gdev)
+{
+ struct vmmdev_host_version *req;
+ int rc, ret;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
+ if (!req)
+ return -ENOMEM;
+
+ rc = vbg_req_perform(gdev, req);
+ ret = vbg_status_code_to_errno(rc);
+ if (ret)
+ goto out;
+
+ snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
+ req->major, req->minor, req->build, req->revision);
+ gdev->host_features = req->features;
+
+ vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
+ gdev->host_features);
+
+ if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
+ vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
+ ret = -ENODEV;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+/**
+ * Initializes the VBoxGuest device extension when the
+ * device driver is loaded.
+ *
+ * The native code locates the VMMDev on the PCI bus and retrieve
+ * the MMIO and I/O port ranges, this function will take care of
+ * mapping the MMIO memory (if present). Upon successful return
+ * the native code should set up the interrupt handler.
+ *
+ * Return: 0 or negative errno value.
+ *
+ * @gdev: The Guest extension device.
+ * @fixed_events: Events that will be enabled upon init and no client
+ * will ever be allowed to mask.
+ */
+int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
+{
+ int ret = -ENOMEM;
+
+ gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
+ gdev->event_filter_host = U32_MAX; /* forces a report */
+ gdev->guest_caps_host = U32_MAX; /* forces a report */
+
+ init_waitqueue_head(&gdev->event_wq);
+ init_waitqueue_head(&gdev->hgcm_wq);
+ spin_lock_init(&gdev->event_spinlock);
+ mutex_init(&gdev->session_mutex);
+ mutex_init(&gdev->cancel_req_mutex);
+ timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
+ INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
+
+ gdev->mem_balloon.get_req =
+ vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
+ VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
+ gdev->mem_balloon.change_req =
+ vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
+ VMMDEVREQ_CHANGE_MEMBALLOON);
+ gdev->cancel_req =
+ vbg_req_alloc(sizeof(*(gdev->cancel_req)),
+ VMMDEVREQ_HGCM_CANCEL2);
+ gdev->ack_events_req =
+ vbg_req_alloc(sizeof(*gdev->ack_events_req),
+ VMMDEVREQ_ACKNOWLEDGE_EVENTS);
+ gdev->mouse_status_req =
+ vbg_req_alloc(sizeof(*gdev->mouse_status_req),
+ VMMDEVREQ_GET_MOUSE_STATUS);
+
+ if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
+ !gdev->cancel_req || !gdev->ack_events_req ||
+ !gdev->mouse_status_req)
+ goto err_free_reqs;
+
+ ret = vbg_query_host_version(gdev);
+ if (ret)
+ goto err_free_reqs;
+
+ ret = vbg_report_guest_info(gdev);
+ if (ret) {
+ vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
+ goto err_free_reqs;
+ }
+
+ ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
+ if (ret) {
+ vbg_err("vboxguest: Error setting fixed event filter: %d\n",
+ ret);
+ goto err_free_reqs;
+ }
+
+ ret = vbg_reset_host_capabilities(gdev);
+ if (ret) {
+ vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
+ ret);
+ goto err_free_reqs;
+ }
+
+ ret = vbg_core_set_mouse_status(gdev, 0);
+ if (ret) {
+ vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
+ goto err_free_reqs;
+ }
+
+ /* These may fail without requiring the driver init to fail. */
+ vbg_guest_mappings_init(gdev);
+ vbg_heartbeat_init(gdev);
+
+ /* All Done! */
+ ret = vbg_report_driver_status(gdev, true);
+ if (ret < 0)
+ vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
+
+ return 0;
+
+err_free_reqs:
+ kfree(gdev->mouse_status_req);
+ kfree(gdev->ack_events_req);
+ kfree(gdev->cancel_req);
+ kfree(gdev->mem_balloon.change_req);
+ kfree(gdev->mem_balloon.get_req);
+ return ret;
+}
+
+/**
+ * Call this on exit to clean-up vboxguest-core managed resources.
+ *
+ * The native code should call this before the driver is loaded,
+ * but don't call this on shutdown.
+ * @gdev: The Guest extension device.
+ */
+void vbg_core_exit(struct vbg_dev *gdev)
+{
+ vbg_heartbeat_exit(gdev);
+ vbg_guest_mappings_exit(gdev);
+
+ /* Clear the host flags (mouse status etc). */
+ vbg_reset_host_event_filter(gdev, 0);
+ vbg_reset_host_capabilities(gdev);
+ vbg_core_set_mouse_status(gdev, 0);
+
+ kfree(gdev->mouse_status_req);
+ kfree(gdev->ack_events_req);
+ kfree(gdev->cancel_req);
+ kfree(gdev->mem_balloon.change_req);
+ kfree(gdev->mem_balloon.get_req);
+}
+
+/**
+ * Creates a VBoxGuest user session.
+ *
+ * vboxguest_linux.c calls this when userspace opens the char-device.
+ * Return: A pointer to the new session or an ERR_PTR on error.
+ * @gdev: The Guest extension device.
+ * @user: Set if this is a session for the vboxuser device.
+ */
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
+{
+ struct vbg_session *session;
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (!session)
+ return ERR_PTR(-ENOMEM);
+
+ session->gdev = gdev;
+ session->user_session = user;
+
+ return session;
+}
+
+/**
+ * Closes a VBoxGuest session.
+ * @session: The session to close (and free).
+ */
+void vbg_core_close_session(struct vbg_session *session)
+{
+ struct vbg_dev *gdev = session->gdev;
+ int i, rc;
+
+ vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
+ vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
+
+ for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
+ if (!session->hgcm_client_ids[i])
+ continue;
+
+ vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
+ }
+
+ kfree(session);
+}
+
+static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
+ size_t out_size)
+{
+ if (hdr->size_in != (sizeof(*hdr) + in_size) ||
+ hdr->size_out != (sizeof(*hdr) + out_size))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vbg_ioctl_driver_version_info(
+ struct vbg_ioctl_driver_version_info *info)
+{
+ const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
+ u16 min_maj_version, req_maj_version;
+
+ if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
+ return -EINVAL;
+
+ req_maj_version = info->u.in.req_version >> 16;
+ min_maj_version = info->u.in.min_version >> 16;
+
+ if (info->u.in.min_version > info->u.in.req_version ||
+ min_maj_version != req_maj_version)
+ return -EINVAL;
+
+ if (info->u.in.min_version <= VBG_IOC_VERSION &&
+ min_maj_version == vbg_maj_version) {
+ info->u.out.session_version = VBG_IOC_VERSION;
+ } else {
+ info->u.out.session_version = U32_MAX;
+ info->hdr.rc = VERR_VERSION_MISMATCH;
+ }
+
+ info->u.out.driver_version = VBG_IOC_VERSION;
+ info->u.out.driver_revision = 0;
+ info->u.out.reserved1 = 0;
+ info->u.out.reserved2 = 0;
+
+ return 0;
+}
+
+static bool vbg_wait_event_cond(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ u32 event_mask)
+{
+ unsigned long flags;
+ bool wakeup;
+ u32 events;
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+
+ events = gdev->pending_events & event_mask;
+ wakeup = events || session->cancel_waiters;
+
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ return wakeup;
+}
+
+/* Must be called with the event_lock held */
+static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ u32 event_mask)
+{
+ u32 events = gdev->pending_events & event_mask;
+
+ gdev->pending_events &= ~events;
+ return events;
+}
+
+static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ struct vbg_ioctl_wait_for_events *wait)
+{
+ u32 timeout_ms = wait->u.in.timeout_ms;
+ u32 event_mask = wait->u.in.events;
+ unsigned long flags;
+ long timeout;
+ int ret = 0;
+
+ if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
+ return -EINVAL;
+
+ if (timeout_ms == U32_MAX)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(timeout_ms);
+
+ wait->u.out.events = 0;
+ do {
+ timeout = wait_event_interruptible_timeout(
+ gdev->event_wq,
+ vbg_wait_event_cond(gdev, session, event_mask),
+ timeout);
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+
+ if (timeout < 0 || session->cancel_waiters) {
+ ret = -EINTR;
+ } else if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ } else {
+ wait->u.out.events =
+ vbg_consume_events_locked(gdev, session, event_mask);
+ }
+
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ /*
+ * Someone else may have consumed the event(s) first, in
+ * which case we go back to waiting.
+ */
+ } while (ret == 0 && wait->u.out.events == 0);
+
+ return ret;
+}
+
+static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ struct vbg_ioctl_hdr *hdr)
+{
+ unsigned long flags;
+
+ if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
+ return -EINVAL;
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+ session->cancel_waiters = true;
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ wake_up(&gdev->event_wq);
+
+ return 0;
+}
+
+/**
+ * Checks if the VMM request is allowed in the context of the given session.
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @session: The calling session.
+ * @req: The request.
+ */
+static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
+ const struct vmmdev_request_header *req)
+{
+ const struct vmmdev_guest_status *guest_status;
+ bool trusted_apps_only;
+
+ switch (req->request_type) {
+ /* Trusted users apps only. */
+ case VMMDEVREQ_QUERY_CREDENTIALS:
+ case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
+ case VMMDEVREQ_REGISTER_SHARED_MODULE:
+ case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
+ case VMMDEVREQ_WRITE_COREDUMP:
+ case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
+ case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
+ case VMMDEVREQ_CHECK_SHARED_MODULES:
+ case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
+ case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
+ case VMMDEVREQ_REPORT_GUEST_STATS:
+ case VMMDEVREQ_REPORT_GUEST_USER_STATE:
+ case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
+ trusted_apps_only = true;
+ break;
+
+ /* Anyone. */
+ case VMMDEVREQ_GET_MOUSE_STATUS:
+ case VMMDEVREQ_SET_MOUSE_STATUS:
+ case VMMDEVREQ_SET_POINTER_SHAPE:
+ case VMMDEVREQ_GET_HOST_VERSION:
+ case VMMDEVREQ_IDLE:
+ case VMMDEVREQ_GET_HOST_TIME:
+ case VMMDEVREQ_SET_POWER_STATUS:
+ case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
+ case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
+ case VMMDEVREQ_REPORT_GUEST_STATUS:
+ case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
+ case VMMDEVREQ_VIDEMODE_SUPPORTED:
+ case VMMDEVREQ_GET_HEIGHT_REDUCTION:
+ case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
+ case VMMDEVREQ_VIDEMODE_SUPPORTED2:
+ case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
+ case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
+ case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
+ case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
+ case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
+ case VMMDEVREQ_GET_VRDPCHANGE_REQ:
+ case VMMDEVREQ_LOG_STRING:
+ case VMMDEVREQ_GET_SESSION_ID:
+ trusted_apps_only = false;
+ break;
+
+ /* Depends on the request parameters... */
+ case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
+ guest_status = (const struct vmmdev_guest_status *)req;
+ switch (guest_status->facility) {
+ case VBOXGUEST_FACILITY_TYPE_ALL:
+ case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
+ vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
+ guest_status->facility);
+ return -EPERM;
+ case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
+ trusted_apps_only = true;
+ break;
+ case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
+ case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
+ case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
+ default:
+ trusted_apps_only = false;
+ break;
+ }
+ break;
+
+ /* Anything else is not allowed. */
+ default:
+ vbg_err("Denying userspace vmm call type %#08x\n",
+ req->request_type);
+ return -EPERM;
+ }
+
+ if (trusted_apps_only && session->user_session) {
+ vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
+ req->request_type);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
+ struct vbg_session *session, void *data)
+{
+ struct vbg_ioctl_hdr *hdr = data;
+ int ret;
+
+ if (hdr->size_in != hdr->size_out)
+ return -EINVAL;
+
+ if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
+ return -E2BIG;
+
+ if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
+ return -EINVAL;
+
+ ret = vbg_req_allowed(gdev, session, data);
+ if (ret < 0)
+ return ret;
+
+ vbg_req_perform(gdev, data);
+ WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
+
+ return 0;
+}
+
+static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ struct vbg_ioctl_hgcm_connect *conn)
+{
+ u32 client_id;
+ int i, ret;
+
+ if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
+ return -EINVAL;
+
+ /* Find a free place in the sessions clients array and claim it */
+ mutex_lock(&gdev->session_mutex);
+ for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
+ if (!session->hgcm_client_ids[i]) {
+ session->hgcm_client_ids[i] = U32_MAX;
+ break;
+ }
+ }
+ mutex_unlock(&gdev->session_mutex);
+
+ if (i >= ARRAY_SIZE(session->hgcm_client_ids))
+ return -EMFILE;
+
+ ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
+ &conn->hdr.rc);
+
+ mutex_lock(&gdev->session_mutex);
+ if (ret == 0 && conn->hdr.rc >= 0) {
+ conn->u.out.client_id = client_id;
+ session->hgcm_client_ids[i] = client_id;
+ } else {
+ conn->u.out.client_id = 0;
+ session->hgcm_client_ids[i] = 0;
+ }
+ mutex_unlock(&gdev->session_mutex);
+
+ return ret;
+}
+
+static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ struct vbg_ioctl_hgcm_disconnect *disconn)
+{
+ u32 client_id;
+ int i, ret;
+
+ if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
+ return -EINVAL;
+
+ client_id = disconn->u.in.client_id;
+ if (client_id == 0 || client_id == U32_MAX)
+ return -EINVAL;
+
+ mutex_lock(&gdev->session_mutex);
+ for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
+ if (session->hgcm_client_ids[i] == client_id) {
+ session->hgcm_client_ids[i] = U32_MAX;
+ break;
+ }
+ }
+ mutex_unlock(&gdev->session_mutex);
+
+ if (i >= ARRAY_SIZE(session->hgcm_client_ids))
+ return -EINVAL;
+
+ ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
+
+ mutex_lock(&gdev->session_mutex);
+ if (ret == 0 && disconn->hdr.rc >= 0)
+ session->hgcm_client_ids[i] = 0;
+ else
+ session->hgcm_client_ids[i] = client_id;
+ mutex_unlock(&gdev->session_mutex);
+
+ return ret;
+}
+
+static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
+ struct vbg_session *session, bool f32bit,
+ struct vbg_ioctl_hgcm_call *call)
+{
+ size_t actual_size;
+ u32 client_id;
+ int i, ret;
+
+ if (call->hdr.size_in < sizeof(*call))
+ return -EINVAL;
+
+ if (call->hdr.size_in != call->hdr.size_out)
+ return -EINVAL;
+
+ if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
+ return -E2BIG;
+
+ client_id = call->client_id;
+ if (client_id == 0 || client_id == U32_MAX)
+ return -EINVAL;
+
+ actual_size = sizeof(*call);
+ if (f32bit)
+ actual_size += call->parm_count *
+ sizeof(struct vmmdev_hgcm_function_parameter32);
+ else
+ actual_size += call->parm_count *
+ sizeof(struct vmmdev_hgcm_function_parameter);
+ if (call->hdr.size_in < actual_size) {
+ vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
+ call->hdr.size_in, actual_size);
+ return -EINVAL;
+ }
+ call->hdr.size_out = actual_size;
+
+ /*
+ * Validate the client id.
+ */
+ mutex_lock(&gdev->session_mutex);
+ for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
+ if (session->hgcm_client_ids[i] == client_id)
+ break;
+ mutex_unlock(&gdev->session_mutex);
+ if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
+ vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
+ client_id);
+ return -EINVAL;
+ }
+
+ if (f32bit)
+ ret = vbg_hgcm_call32(gdev, client_id,
+ call->function, call->timeout_ms,
+ VBG_IOCTL_HGCM_CALL_PARMS32(call),
+ call->parm_count, &call->hdr.rc);
+ else
+ ret = vbg_hgcm_call(gdev, client_id,
+ call->function, call->timeout_ms,
+ VBG_IOCTL_HGCM_CALL_PARMS(call),
+ call->parm_count, &call->hdr.rc);
+
+ if (ret == -E2BIG) {
+ /* E2BIG needs to be reported through the hdr.rc field. */
+ call->hdr.rc = VERR_OUT_OF_RANGE;
+ ret = 0;
+ }
+
+ if (ret && ret != -EINTR && ret != -ETIMEDOUT)
+ vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
+
+ return ret;
+}
+
+static int vbg_ioctl_log(struct vbg_ioctl_log *log)
+{
+ if (log->hdr.size_out != sizeof(log->hdr))
+ return -EINVAL;
+
+ vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
+ log->u.in.msg);
+
+ return 0;
+}
+
+static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
+ struct vbg_session *session,
+ struct vbg_ioctl_change_filter *filter)
+{
+ u32 or_mask, not_mask;
+
+ if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
+ return -EINVAL;
+
+ or_mask = filter->u.in.or_mask;
+ not_mask = filter->u.in.not_mask;
+
+ if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
+ return -EINVAL;
+
+ return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
+ false);
+}
+
+static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
+ struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
+{
+ u32 or_mask, not_mask;
+ int ret;
+
+ if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
+ return -EINVAL;
+
+ or_mask = caps->u.in.or_mask;
+ not_mask = caps->u.in.not_mask;
+
+ if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
+ return -EINVAL;
+
+ ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
+ false);
+ if (ret)
+ return ret;
+
+ caps->u.out.session_caps = session->guest_caps;
+ caps->u.out.global_caps = gdev->guest_caps_host;
+
+ return 0;
+}
+
+static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
+ struct vbg_ioctl_check_balloon *balloon_info)
+{
+ if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
+ return -EINVAL;
+
+ balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
+ /*
+ * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
+ * events entirely in the kernel, see vbg_core_isr().
+ */
+ balloon_info->u.out.handle_in_r3 = false;
+
+ return 0;
+}
+
+static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+ struct vbg_ioctl_write_coredump *dump)
+{
+ struct vmmdev_write_core_dump *req;
+
+ if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
+ return -EINVAL;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
+ if (!req)
+ return -ENOMEM;
+
+ req->flags = dump->u.in.flags;
+ dump->hdr.rc = vbg_req_perform(gdev, req);
+
+ kfree(req);
+ return 0;
+}
+
+/**
+ * Common IOCtl for user to kernel communication.
+ * Return: 0 or negative errno value.
+ * @session: The client session.
+ * @req: The requested function.
+ * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
+ */
+int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
+{
+ unsigned int req_no_size = req & ~IOCSIZE_MASK;
+ struct vbg_dev *gdev = session->gdev;
+ struct vbg_ioctl_hdr *hdr = data;
+ bool f32bit = false;
+
+ hdr->rc = VINF_SUCCESS;
+ if (!hdr->size_out)
+ hdr->size_out = hdr->size_in;
+
+ /*
+ * hdr->version and hdr->size_in / hdr->size_out minimum size are
+ * already checked by vbg_misc_device_ioctl().
+ */
+
+ /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
+ if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
+ req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
+ return vbg_ioctl_vmmrequest(gdev, session, data);
+
+ if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
+ return -EINVAL;
+
+ /* Fixed size requests. */
+ switch (req) {
+ case VBG_IOCTL_DRIVER_VERSION_INFO:
+ return vbg_ioctl_driver_version_info(data);
+ case VBG_IOCTL_HGCM_CONNECT:
+ return vbg_ioctl_hgcm_connect(gdev, session, data);
+ case VBG_IOCTL_HGCM_DISCONNECT:
+ return vbg_ioctl_hgcm_disconnect(gdev, session, data);
+ case VBG_IOCTL_WAIT_FOR_EVENTS:
+ return vbg_ioctl_wait_for_events(gdev, session, data);
+ case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
+ return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
+ case VBG_IOCTL_CHANGE_FILTER_MASK:
+ return vbg_ioctl_change_filter_mask(gdev, session, data);
+ case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
+ return vbg_ioctl_change_guest_capabilities(gdev, session, data);
+ case VBG_IOCTL_CHECK_BALLOON:
+ return vbg_ioctl_check_balloon(gdev, data);
+ case VBG_IOCTL_WRITE_CORE_DUMP:
+ return vbg_ioctl_write_core_dump(gdev, data);
+ }
+
+ /* Variable sized requests. */
+ switch (req_no_size) {
+#ifdef CONFIG_COMPAT
+ case VBG_IOCTL_HGCM_CALL_32(0):
+ f32bit = true;
+ /* Fall through */
+#endif
+ case VBG_IOCTL_HGCM_CALL(0):
+ return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
+ case VBG_IOCTL_LOG(0):
+ return vbg_ioctl_log(data);
+ }
+
+ vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
+ return -ENOTTY;
+}
+
+/**
+ * Report guest supported mouse-features to the host.
+ *
+ * Return: 0 or negative errno value.
+ * @gdev: The Guest extension device.
+ * @features: The set of features to report to the host.
+ */
+int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
+{
+ struct vmmdev_mouse_status *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
+ if (!req)
+ return -ENOMEM;
+
+ req->mouse_features = features;
+ req->pointer_pos_x = 0;
+ req->pointer_pos_y = 0;
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ vbg_err("%s error, rc: %d\n", __func__, rc);
+
+ kfree(req);
+ return vbg_status_code_to_errno(rc);
+}
+
+/** Core interrupt service routine. */
+irqreturn_t vbg_core_isr(int irq, void *dev_id)
+{
+ struct vbg_dev *gdev = dev_id;
+ struct vmmdev_events *req = gdev->ack_events_req;
+ bool mouse_position_changed = false;
+ unsigned long flags;
+ u32 events = 0;
+ int rc;
+
+ if (!gdev->mmio->V.V1_04.have_events)
+ return IRQ_NONE;
+
+ /* Get and acknowlegde events. */
+ req->header.rc = VERR_INTERNAL_ERROR;
+ req->events = 0;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ vbg_err("Error performing events req, rc: %d\n", rc);
+ return IRQ_NONE;
+ }
+
+ events = req->events;
+
+ if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
+ mouse_position_changed = true;
+ events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
+ }
+
+ if (events & VMMDEV_EVENT_HGCM) {
+ wake_up(&gdev->hgcm_wq);
+ events &= ~VMMDEV_EVENT_HGCM;
+ }
+
+ if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
+ schedule_work(&gdev->mem_balloon.work);
+ events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
+ }
+
+ if (events) {
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+ gdev->pending_events |= events;
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ wake_up(&gdev->event_wq);
+ }
+
+ if (mouse_position_changed)
+ vbg_linux_mouse_event(gdev);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
new file mode 100644
index 000000000000..6c784bf4fa6d
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/* Copyright (C) 2010-2016 Oracle Corporation */
+
+#ifndef __VBOXGUEST_CORE_H__
+#define __VBOXGUEST_CORE_H__
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/vboxguest.h>
+#include "vmmdev.h"
+
+struct vbg_session;
+
+/** VBox guest memory balloon. */
+struct vbg_mem_balloon {
+ /** Work handling VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events */
+ struct work_struct work;
+ /** Pre-allocated vmmdev_memballoon_info req for query */
+ struct vmmdev_memballoon_info *get_req;
+ /** Pre-allocated vmmdev_memballoon_change req for inflate / deflate */
+ struct vmmdev_memballoon_change *change_req;
+ /** The current number of chunks in the balloon. */
+ u32 chunks;
+ /** The maximum number of chunks in the balloon. */
+ u32 max_chunks;
+ /**
+ * Array of pointers to page arrays. A page * array is allocated for
+ * each chunk when inflating, and freed when the deflating.
+ */
+ struct page ***pages;
+};
+
+/**
+ * Per bit usage tracker for a u32 mask.
+ *
+ * Used for optimal handling of guest properties and event filter.
+ */
+struct vbg_bit_usage_tracker {
+ /** Per bit usage counters. */
+ u32 per_bit_usage[32];
+ /** The current mask according to per_bit_usage. */
+ u32 mask;
+};
+
+/** VBox guest device (data) extension. */
+struct vbg_dev {
+ struct device *dev;
+ /** The base of the adapter I/O ports. */
+ u16 io_port;
+ /** Pointer to the mapping of the VMMDev adapter memory. */
+ struct vmmdev_memory *mmio;
+ /** Host version */
+ char host_version[64];
+ /** Host features */
+ unsigned int host_features;
+ /**
+ * Dummy page and vmap address for reserved kernel virtual-address
+ * space for the guest mappings, only used on hosts lacking vtx.
+ */
+ struct page *guest_mappings_dummy_page;
+ void *guest_mappings;
+ /** Spinlock protecting pending_events. */
+ spinlock_t event_spinlock;
+ /** Preallocated struct vmmdev_events for the IRQ handler. */
+ struct vmmdev_events *ack_events_req;
+ /** Wait-for-event list for threads waiting for multiple events. */
+ wait_queue_head_t event_wq;
+ /** Mask of pending events. */
+ u32 pending_events;
+ /** Wait-for-event list for threads waiting on HGCM async completion. */
+ wait_queue_head_t hgcm_wq;
+ /** Pre-allocated hgcm cancel2 req. for cancellation on timeout */
+ struct vmmdev_hgcm_cancel2 *cancel_req;
+ /** Mutex protecting cancel_req accesses */
+ struct mutex cancel_req_mutex;
+ /** Pre-allocated mouse-status request for the input-device handling. */
+ struct vmmdev_mouse_status *mouse_status_req;
+ /** Input device for reporting abs mouse coordinates to the guest. */
+ struct input_dev *input;
+
+ /** Memory balloon information. */
+ struct vbg_mem_balloon mem_balloon;
+
+ /** Lock for session related items in vbg_dev and vbg_session */
+ struct mutex session_mutex;
+ /** Events we won't permit anyone to filter out. */
+ u32 fixed_events;
+ /**
+ * Usage counters for the host events (excludes fixed events),
+ * Protected by session_mutex.
+ */
+ struct vbg_bit_usage_tracker event_filter_tracker;
+ /**
+ * The event filter last reported to the host (or UINT32_MAX).
+ * Protected by session_mutex.
+ */
+ u32 event_filter_host;
+
+ /**
+ * Usage counters for guest capabilities. Indexed by capability bit
+ * number, one count per session using a capability.
+ * Protected by session_mutex.
+ */
+ struct vbg_bit_usage_tracker guest_caps_tracker;
+ /**
+ * The guest capabilities last reported to the host (or UINT32_MAX).
+ * Protected by session_mutex.
+ */
+ u32 guest_caps_host;
+
+ /**
+ * Heartbeat timer which fires with interval
+ * cNsHearbeatInterval and its handler sends
+ * VMMDEVREQ_GUEST_HEARTBEAT to VMMDev.
+ */
+ struct timer_list heartbeat_timer;
+ /** Heartbeat timer interval in ms. */
+ int heartbeat_interval_ms;
+ /** Preallocated VMMDEVREQ_GUEST_HEARTBEAT request. */
+ struct vmmdev_request_header *guest_heartbeat_req;
+
+ /** "vboxguest" char-device */
+ struct miscdevice misc_device;
+ /** "vboxuser" char-device */
+ struct miscdevice misc_device_user;
+};
+
+/** The VBoxGuest per session data. */
+struct vbg_session {
+ /** Pointer to the device extension. */
+ struct vbg_dev *gdev;
+
+ /**
+ * Array containing HGCM client IDs associated with this session.
+ * These will be automatically disconnected when the session is closed.
+ * Protected by vbg_gdev.session_mutex.
+ */
+ u32 hgcm_client_ids[64];
+ /**
+ * Host events requested by the session.
+ * An event type requested in any guest session will be added to the
+ * host filter. Protected by vbg_gdev.session_mutex.
+ */
+ u32 event_filter;
+ /**
+ * Guest capabilities for this session.
+ * A capability claimed by any guest session will be reported to the
+ * host. Protected by vbg_gdev.session_mutex.
+ */
+ u32 guest_caps;
+ /** Does this session belong to a root process or a user one? */
+ bool user_session;
+ /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
+ bool cancel_waiters;
+};
+
+int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
+void vbg_core_exit(struct vbg_dev *gdev);
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
+void vbg_core_close_session(struct vbg_session *session);
+int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
+int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
+
+irqreturn_t vbg_core_isr(int irq, void *dev_id);
+
+void vbg_linux_mouse_event(struct vbg_dev *gdev);
+
+#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
new file mode 100644
index 000000000000..82e280d38cc2
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -0,0 +1,466 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * vboxguest linux pci driver, char-dev and input-device code,
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ */
+
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/vbox_utils.h>
+#include "vboxguest_core.h"
+
+/** The device name. */
+#define DEVICE_NAME "vboxguest"
+/** The device name for the device node open to everyone. */
+#define DEVICE_NAME_USER "vboxuser"
+/** VirtualBox PCI vendor ID. */
+#define VBOX_VENDORID 0x80ee
+/** VMMDev PCI card product ID. */
+#define VMMDEV_DEVICEID 0xcafe
+
+/** Mutex protecting the global vbg_gdev pointer used by vbg_get/put_gdev. */
+static DEFINE_MUTEX(vbg_gdev_mutex);
+/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
+static struct vbg_dev *vbg_gdev;
+
+static int vbg_misc_device_open(struct inode *inode, struct file *filp)
+{
+ struct vbg_session *session;
+ struct vbg_dev *gdev;
+
+ /* misc_open sets filp->private_data to our misc device */
+ gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
+
+ session = vbg_core_open_session(gdev, false);
+ if (IS_ERR(session))
+ return PTR_ERR(session);
+
+ filp->private_data = session;
+ return 0;
+}
+
+static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
+{
+ struct vbg_session *session;
+ struct vbg_dev *gdev;
+
+ /* misc_open sets filp->private_data to our misc device */
+ gdev = container_of(filp->private_data, struct vbg_dev,
+ misc_device_user);
+
+ session = vbg_core_open_session(gdev, false);
+ if (IS_ERR(session))
+ return PTR_ERR(session);
+
+ filp->private_data = session;
+ return 0;
+}
+
+/**
+ * Close device.
+ * Return: 0 on success, negated errno on failure.
+ * @inode: Pointer to inode info structure.
+ * @filp: Associated file pointer.
+ */
+static int vbg_misc_device_close(struct inode *inode, struct file *filp)
+{
+ vbg_core_close_session(filp->private_data);
+ filp->private_data = NULL;
+ return 0;
+}
+
+/**
+ * Device I/O Control entry point.
+ * Return: 0 on success, negated errno on failure.
+ * @filp: Associated file pointer.
+ * @req: The request specified to ioctl().
+ * @arg: The argument specified to ioctl().
+ */
+static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
+ unsigned long arg)
+{
+ struct vbg_session *session = filp->private_data;
+ size_t returned_size, size;
+ struct vbg_ioctl_hdr hdr;
+ int ret = 0;
+ void *buf;
+
+ if (copy_from_user(&hdr, (void *)arg, sizeof(hdr)))
+ return -EFAULT;
+
+ if (hdr.version != VBG_IOCTL_HDR_VERSION)
+ return -EINVAL;
+
+ if (hdr.size_in < sizeof(hdr) ||
+ (hdr.size_out && hdr.size_out < sizeof(hdr)))
+ return -EINVAL;
+
+ size = max(hdr.size_in, hdr.size_out);
+ if (_IOC_SIZE(req) && _IOC_SIZE(req) != size)
+ return -EINVAL;
+ if (size > SZ_16M)
+ return -E2BIG;
+
+ /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */
+ buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, (void *)arg, hdr.size_in)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (hdr.size_in < size)
+ memset(buf + hdr.size_in, 0, size - hdr.size_in);
+
+ ret = vbg_core_ioctl(session, req, buf);
+ if (ret)
+ goto out;
+
+ returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out;
+ if (returned_size > size) {
+ vbg_debug("%s: too much output data %zu > %zu\n",
+ __func__, returned_size, size);
+ returned_size = size;
+ }
+ if (copy_to_user((void *)arg, buf, returned_size) != 0)
+ ret = -EFAULT;
+
+out:
+ kfree(buf);
+
+ return ret;
+}
+
+/** The file_operations structures. */
+static const struct file_operations vbg_misc_device_fops = {
+ .owner = THIS_MODULE,
+ .open = vbg_misc_device_open,
+ .release = vbg_misc_device_close,
+ .unlocked_ioctl = vbg_misc_device_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vbg_misc_device_ioctl,
+#endif
+};
+static const struct file_operations vbg_misc_device_user_fops = {
+ .owner = THIS_MODULE,
+ .open = vbg_misc_device_user_open,
+ .release = vbg_misc_device_close,
+ .unlocked_ioctl = vbg_misc_device_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vbg_misc_device_ioctl,
+#endif
+};
+
+/**
+ * Called when the input device is first opened.
+ *
+ * Sets up absolute mouse reporting.
+ */
+static int vbg_input_open(struct input_dev *input)
+{
+ struct vbg_dev *gdev = input_get_drvdata(input);
+ u32 feat = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_NEW_PROTOCOL;
+ int ret;
+
+ ret = vbg_core_set_mouse_status(gdev, feat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Called if all open handles to the input device are closed.
+ *
+ * Disables absolute reporting.
+ */
+static void vbg_input_close(struct input_dev *input)
+{
+ struct vbg_dev *gdev = input_get_drvdata(input);
+
+ vbg_core_set_mouse_status(gdev, 0);
+}
+
+/**
+ * Creates the kernel input device.
+ *
+ * Return: 0 on success, negated errno on failure.
+ */
+static int vbg_create_input_device(struct vbg_dev *gdev)
+{
+ struct input_dev *input;
+
+ input = devm_input_allocate_device(gdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->id.bustype = BUS_PCI;
+ input->id.vendor = VBOX_VENDORID;
+ input->id.product = VMMDEV_DEVICEID;
+ input->open = vbg_input_open;
+ input->close = vbg_input_close;
+ input->dev.parent = gdev->dev;
+ input->name = "VirtualBox mouse integration";
+
+ input_set_abs_params(input, ABS_X, VMMDEV_MOUSE_RANGE_MIN,
+ VMMDEV_MOUSE_RANGE_MAX, 0, 0);
+ input_set_abs_params(input, ABS_Y, VMMDEV_MOUSE_RANGE_MIN,
+ VMMDEV_MOUSE_RANGE_MAX, 0, 0);
+ input_set_capability(input, EV_KEY, BTN_MOUSE);
+ input_set_drvdata(input, gdev);
+
+ gdev->input = input;
+
+ return input_register_device(gdev->input);
+}
+
+static ssize_t host_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vbg_dev *gdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", gdev->host_version);
+}
+
+static ssize_t host_features_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vbg_dev *gdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%#x\n", gdev->host_features);
+}
+
+static DEVICE_ATTR_RO(host_version);
+static DEVICE_ATTR_RO(host_features);
+
+/**
+ * Does the PCI detection and init of the device.
+ *
+ * Return: 0 on success, negated errno on failure.
+ */
+static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ struct device *dev = &pci->dev;
+ resource_size_t io, io_len, mmio, mmio_len;
+ struct vmmdev_memory *vmmdev;
+ struct vbg_dev *gdev;
+ int ret;
+
+ gdev = devm_kzalloc(dev, sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ ret = pci_enable_device(pci);
+ if (ret != 0) {
+ vbg_err("vboxguest: Error enabling device: %d\n", ret);
+ return ret;
+ }
+
+ ret = -ENODEV;
+
+ io = pci_resource_start(pci, 0);
+ io_len = pci_resource_len(pci, 0);
+ if (!io || !io_len) {
+ vbg_err("vboxguest: Error IO-port resource (0) is missing\n");
+ goto err_disable_pcidev;
+ }
+ if (devm_request_region(dev, io, io_len, DEVICE_NAME) == NULL) {
+ vbg_err("vboxguest: Error could not claim IO resource\n");
+ ret = -EBUSY;
+ goto err_disable_pcidev;
+ }
+
+ mmio = pci_resource_start(pci, 1);
+ mmio_len = pci_resource_len(pci, 1);
+ if (!mmio || !mmio_len) {
+ vbg_err("vboxguest: Error MMIO resource (1) is missing\n");
+ goto err_disable_pcidev;
+ }
+
+ if (devm_request_mem_region(dev, mmio, mmio_len, DEVICE_NAME) == NULL) {
+ vbg_err("vboxguest: Error could not claim MMIO resource\n");
+ ret = -EBUSY;
+ goto err_disable_pcidev;
+ }
+
+ vmmdev = devm_ioremap(dev, mmio, mmio_len);
+ if (!vmmdev) {
+ vbg_err("vboxguest: Error ioremap failed; MMIO addr=%pap size=%pap\n",
+ &mmio, &mmio_len);
+ goto err_disable_pcidev;
+ }
+
+ /* Validate MMIO region version and size. */
+ if (vmmdev->version != VMMDEV_MEMORY_VERSION ||
+ vmmdev->size < 32 || vmmdev->size > mmio_len) {
+ vbg_err("vboxguest: Bogus VMMDev memory; version=%08x (expected %08x) size=%d (expected <= %d)\n",
+ vmmdev->version, VMMDEV_MEMORY_VERSION,
+ vmmdev->size, (int)mmio_len);
+ goto err_disable_pcidev;
+ }
+
+ gdev->io_port = io;
+ gdev->mmio = vmmdev;
+ gdev->dev = dev;
+ gdev->misc_device.minor = MISC_DYNAMIC_MINOR;
+ gdev->misc_device.name = DEVICE_NAME;
+ gdev->misc_device.fops = &vbg_misc_device_fops;
+ gdev->misc_device_user.minor = MISC_DYNAMIC_MINOR;
+ gdev->misc_device_user.name = DEVICE_NAME_USER;
+ gdev->misc_device_user.fops = &vbg_misc_device_user_fops;
+
+ ret = vbg_core_init(gdev, VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
+ if (ret)
+ goto err_disable_pcidev;
+
+ ret = vbg_create_input_device(gdev);
+ if (ret) {
+ vbg_err("vboxguest: Error creating input device: %d\n", ret);
+ goto err_vbg_core_exit;
+ }
+
+ ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED,
+ DEVICE_NAME, gdev);
+ if (ret) {
+ vbg_err("vboxguest: Error requesting irq: %d\n", ret);
+ goto err_vbg_core_exit;
+ }
+
+ ret = misc_register(&gdev->misc_device);
+ if (ret) {
+ vbg_err("vboxguest: Error misc_register %s failed: %d\n",
+ DEVICE_NAME, ret);
+ goto err_vbg_core_exit;
+ }
+
+ ret = misc_register(&gdev->misc_device_user);
+ if (ret) {
+ vbg_err("vboxguest: Error misc_register %s failed: %d\n",
+ DEVICE_NAME_USER, ret);
+ goto err_unregister_misc_device;
+ }
+
+ mutex_lock(&vbg_gdev_mutex);
+ if (!vbg_gdev)
+ vbg_gdev = gdev;
+ else
+ ret = -EBUSY;
+ mutex_unlock(&vbg_gdev_mutex);
+
+ if (ret) {
+ vbg_err("vboxguest: Error more then 1 vbox guest pci device\n");
+ goto err_unregister_misc_device_user;
+ }
+
+ pci_set_drvdata(pci, gdev);
+ device_create_file(dev, &dev_attr_host_version);
+ device_create_file(dev, &dev_attr_host_features);
+
+ vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %pap (size %pap)\n",
+ gdev->misc_device.minor, pci->irq, gdev->io_port,
+ &mmio, &mmio_len);
+
+ return 0;
+
+err_unregister_misc_device_user:
+ misc_deregister(&gdev->misc_device_user);
+err_unregister_misc_device:
+ misc_deregister(&gdev->misc_device);
+err_vbg_core_exit:
+ vbg_core_exit(gdev);
+err_disable_pcidev:
+ pci_disable_device(pci);
+
+ return ret;
+}
+
+static void vbg_pci_remove(struct pci_dev *pci)
+{
+ struct vbg_dev *gdev = pci_get_drvdata(pci);
+
+ mutex_lock(&vbg_gdev_mutex);
+ vbg_gdev = NULL;
+ mutex_unlock(&vbg_gdev_mutex);
+
+ device_remove_file(gdev->dev, &dev_attr_host_features);
+ device_remove_file(gdev->dev, &dev_attr_host_version);
+ misc_deregister(&gdev->misc_device_user);
+ misc_deregister(&gdev->misc_device);
+ vbg_core_exit(gdev);
+ pci_disable_device(pci);
+}
+
+struct vbg_dev *vbg_get_gdev(void)
+{
+ mutex_lock(&vbg_gdev_mutex);
+
+ /*
+ * Note on success we keep the mutex locked until vbg_put_gdev(),
+ * this stops vbg_pci_remove from removing the device from underneath
+ * vboxsf. vboxsf will only hold a reference for a short while.
+ */
+ if (vbg_gdev)
+ return vbg_gdev;
+
+ mutex_unlock(&vbg_gdev_mutex);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(vbg_get_gdev);
+
+void vbg_put_gdev(struct vbg_dev *gdev)
+{
+ WARN_ON(gdev != vbg_gdev);
+ mutex_unlock(&vbg_gdev_mutex);
+}
+EXPORT_SYMBOL(vbg_put_gdev);
+
+/**
+ * Callback for mouse events.
+ *
+ * This is called at the end of the ISR, after leaving the event spinlock, if
+ * VMMDEV_EVENT_MOUSE_POSITION_CHANGED was raised by the host.
+ *
+ * @gdev: The device extension.
+ */
+void vbg_linux_mouse_event(struct vbg_dev *gdev)
+{
+ int rc;
+
+ /* Report events to the kernel input device */
+ gdev->mouse_status_req->mouse_features = 0;
+ gdev->mouse_status_req->pointer_pos_x = 0;
+ gdev->mouse_status_req->pointer_pos_y = 0;
+ rc = vbg_req_perform(gdev, gdev->mouse_status_req);
+ if (rc >= 0) {
+ input_report_abs(gdev->input, ABS_X,
+ gdev->mouse_status_req->pointer_pos_x);
+ input_report_abs(gdev->input, ABS_Y,
+ gdev->mouse_status_req->pointer_pos_y);
+ input_sync(gdev->input);
+ }
+}
+
+static const struct pci_device_id vbg_pci_ids[] = {
+ { .vendor = VBOX_VENDORID, .device = VMMDEV_DEVICEID },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, vbg_pci_ids);
+
+static struct pci_driver vbg_pci_driver = {
+ .name = DEVICE_NAME,
+ .id_table = vbg_pci_ids,
+ .probe = vbg_pci_probe,
+ .remove = vbg_pci_remove,
+};
+
+module_pci_driver(vbg_pci_driver);
+
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_DESCRIPTION("Oracle VM VirtualBox Guest Additions for Linux Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
new file mode 100644
index 000000000000..0f0dab8023cf
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -0,0 +1,803 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
+ * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include "vboxguest_core.h"
+
+/* Get the pointer to the first parameter of a HGCM call request. */
+#define VMMDEV_HGCM_CALL_PARMS(a) \
+ ((struct vmmdev_hgcm_function_parameter *)( \
+ (u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
+
+/* The max parameter buffer size for a user request. */
+#define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M)
+/* The max parameter buffer size for a kernel request. */
+#define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
+
+#define VBG_DEBUG_PORT 0x504
+
+/* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
+static DEFINE_SPINLOCK(vbg_log_lock);
+static char vbg_log_buf[128];
+
+#define VBG_LOG(name, pr_func) \
+void name(const char *fmt, ...) \
+{ \
+ unsigned long flags; \
+ va_list args; \
+ int i, count; \
+ \
+ va_start(args, fmt); \
+ spin_lock_irqsave(&vbg_log_lock, flags); \
+ \
+ count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
+ for (i = 0; i < count; i++) \
+ outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
+ \
+ pr_func("%s", vbg_log_buf); \
+ \
+ spin_unlock_irqrestore(&vbg_log_lock, flags); \
+ va_end(args); \
+} \
+EXPORT_SYMBOL(name)
+
+VBG_LOG(vbg_info, pr_info);
+VBG_LOG(vbg_warn, pr_warn);
+VBG_LOG(vbg_err, pr_err);
+#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
+VBG_LOG(vbg_debug, pr_debug);
+#endif
+
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+{
+ struct vmmdev_request_header *req;
+
+ req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
+ if (!req)
+ return NULL;
+
+ memset(req, 0xaa, len);
+
+ req->size = len;
+ req->version = VMMDEV_REQUEST_HEADER_VERSION;
+ req->request_type = req_type;
+ req->rc = VERR_GENERAL_FAILURE;
+ req->reserved1 = 0;
+ req->reserved2 = 0;
+
+ return req;
+}
+
+/* Note this function returns a VBox status code, not a negative errno!! */
+int vbg_req_perform(struct vbg_dev *gdev, void *req)
+{
+ unsigned long phys_req = virt_to_phys(req);
+
+ outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
+ /*
+ * The host changes the request as a result of the outl, make sure
+ * the outl and any reads of the req happen in the correct order.
+ */
+ mb();
+
+ return ((struct vmmdev_request_header *)req)->rc;
+}
+
+static bool hgcm_req_done(struct vbg_dev *gdev,
+ struct vmmdev_hgcmreq_header *header)
+{
+ unsigned long flags;
+ bool done;
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+ done = header->flags & VMMDEV_HGCM_REQ_DONE;
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ return done;
+}
+
+int vbg_hgcm_connect(struct vbg_dev *gdev,
+ struct vmmdev_hgcm_service_location *loc,
+ u32 *client_id, int *vbox_status)
+{
+ struct vmmdev_hgcm_connect *hgcm_connect = NULL;
+ int rc;
+
+ hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
+ VMMDEVREQ_HGCM_CONNECT);
+ if (!hgcm_connect)
+ return -ENOMEM;
+
+ hgcm_connect->header.flags = 0;
+ memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
+ hgcm_connect->client_id = 0;
+
+ rc = vbg_req_perform(gdev, hgcm_connect);
+
+ if (rc == VINF_HGCM_ASYNC_EXECUTE)
+ wait_event(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &hgcm_connect->header));
+
+ if (rc >= 0) {
+ *client_id = hgcm_connect->client_id;
+ rc = hgcm_connect->header.result;
+ }
+
+ kfree(hgcm_connect);
+
+ *vbox_status = rc;
+ return 0;
+}
+EXPORT_SYMBOL(vbg_hgcm_connect);
+
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+{
+ struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
+ int rc;
+
+ hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
+ VMMDEVREQ_HGCM_DISCONNECT);
+ if (!hgcm_disconnect)
+ return -ENOMEM;
+
+ hgcm_disconnect->header.flags = 0;
+ hgcm_disconnect->client_id = client_id;
+
+ rc = vbg_req_perform(gdev, hgcm_disconnect);
+
+ if (rc == VINF_HGCM_ASYNC_EXECUTE)
+ wait_event(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &hgcm_disconnect->header));
+
+ if (rc >= 0)
+ rc = hgcm_disconnect->header.result;
+
+ kfree(hgcm_disconnect);
+
+ *vbox_status = rc;
+ return 0;
+}
+EXPORT_SYMBOL(vbg_hgcm_disconnect);
+
+static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
+{
+ u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
+
+ return size >> PAGE_SHIFT;
+}
+
+static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
+{
+ u32 page_count;
+
+ page_count = hgcm_call_buf_size_in_pages(buf, len);
+ *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
+}
+
+static int hgcm_call_preprocess_linaddr(
+ const struct vmmdev_hgcm_function_parameter *src_parm,
+ void **bounce_buf_ret, size_t *extra)
+{
+ void *buf, *bounce_buf;
+ bool copy_in;
+ u32 len;
+ int ret;
+
+ buf = (void *)src_parm->u.pointer.u.linear_addr;
+ len = src_parm->u.pointer.size;
+ copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
+
+ if (len > VBG_MAX_HGCM_USER_PARM)
+ return -E2BIG;
+
+ bounce_buf = kvmalloc(len, GFP_KERNEL);
+ if (!bounce_buf)
+ return -ENOMEM;
+
+ if (copy_in) {
+ ret = copy_from_user(bounce_buf, (void __user *)buf, len);
+ if (ret)
+ return -EFAULT;
+ } else {
+ memset(bounce_buf, 0, len);
+ }
+
+ *bounce_buf_ret = bounce_buf;
+ hgcm_call_add_pagelist_size(bounce_buf, len, extra);
+ return 0;
+}
+
+/**
+ * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
+ * figure out how much extra storage we need for page lists.
+ * Return: 0 or negative errno value.
+ * @src_parm: Pointer to source function call parameters
+ * @parm_count: Number of function call parameters.
+ * @bounce_bufs_ret: Where to return the allocated bouncebuffer array
+ * @extra: Where to return the extra request space needed for
+ * physical page lists.
+ */
+static int hgcm_call_preprocess(
+ const struct vmmdev_hgcm_function_parameter *src_parm,
+ u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
+{
+ void *buf, **bounce_bufs = NULL;
+ u32 i, len;
+ int ret;
+
+ for (i = 0; i < parm_count; i++, src_parm++) {
+ switch (src_parm->type) {
+ case VMMDEV_HGCM_PARM_TYPE_32BIT:
+ case VMMDEV_HGCM_PARM_TYPE_64BIT:
+ break;
+
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+ if (!bounce_bufs) {
+ bounce_bufs = kcalloc(parm_count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (!bounce_bufs)
+ return -ENOMEM;
+
+ *bounce_bufs_ret = bounce_bufs;
+ }
+
+ ret = hgcm_call_preprocess_linaddr(src_parm,
+ &bounce_bufs[i],
+ extra);
+ if (ret)
+ return ret;
+
+ break;
+
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
+ buf = (void *)src_parm->u.pointer.u.linear_addr;
+ len = src_parm->u.pointer.size;
+ if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
+ return -E2BIG;
+
+ hgcm_call_add_pagelist_size(buf, len, extra);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Translates linear address types to page list direction flags.
+ *
+ * Return: page list flags.
+ * @type: The type.
+ */
+static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
+ enum vmmdev_hgcm_function_parameter_type type)
+{
+ switch (type) {
+ default:
+ WARN_ON(1);
+ /* Fall through */
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
+ return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
+
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
+ return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
+
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
+ return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
+ }
+}
+
+static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
+ struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
+ enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
+{
+ struct vmmdev_hgcm_pagelist *dst_pg_lst;
+ struct page *page;
+ bool is_vmalloc;
+ u32 i, page_count;
+
+ dst_parm->type = type;
+
+ if (len == 0) {
+ dst_parm->u.pointer.size = 0;
+ dst_parm->u.pointer.u.linear_addr = 0;
+ return;
+ }
+
+ dst_pg_lst = (void *)call + *off_extra;
+ page_count = hgcm_call_buf_size_in_pages(buf, len);
+ is_vmalloc = is_vmalloc_addr(buf);
+
+ dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
+ dst_parm->u.page_list.size = len;
+ dst_parm->u.page_list.offset = *off_extra;
+ dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
+ dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
+ dst_pg_lst->page_count = page_count;
+
+ for (i = 0; i < page_count; i++) {
+ if (is_vmalloc)
+ page = vmalloc_to_page(buf);
+ else
+ page = virt_to_page(buf);
+
+ dst_pg_lst->pages[i] = page_to_phys(page);
+ buf += PAGE_SIZE;
+ }
+
+ *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
+}
+
+/**
+ * Initializes the call request that we're sending to the host.
+ * @call: The call to initialize.
+ * @client_id: The client ID of the caller.
+ * @function: The function number of the function to call.
+ * @src_parm: Pointer to source function call parameters.
+ * @parm_count: Number of function call parameters.
+ * @bounce_bufs: The bouncebuffer array.
+ */
+static void hgcm_call_init_call(
+ struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
+ const struct vmmdev_hgcm_function_parameter *src_parm,
+ u32 parm_count, void **bounce_bufs)
+{
+ struct vmmdev_hgcm_function_parameter *dst_parm =
+ VMMDEV_HGCM_CALL_PARMS(call);
+ u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
+ void *buf;
+
+ call->header.flags = 0;
+ call->header.result = VINF_SUCCESS;
+ call->client_id = client_id;
+ call->function = function;
+ call->parm_count = parm_count;
+
+ for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
+ switch (src_parm->type) {
+ case VMMDEV_HGCM_PARM_TYPE_32BIT:
+ case VMMDEV_HGCM_PARM_TYPE_64BIT:
+ *dst_parm = *src_parm;
+ break;
+
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+ hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
+ src_parm->u.pointer.size,
+ src_parm->type, &off_extra);
+ break;
+
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
+ buf = (void *)src_parm->u.pointer.u.linear_addr;
+ hgcm_call_init_linaddr(call, dst_parm, buf,
+ src_parm->u.pointer.size,
+ src_parm->type, &off_extra);
+ break;
+
+ default:
+ WARN_ON(1);
+ dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
+ }
+ }
+}
+
+/**
+ * Tries to cancel a pending HGCM call.
+ *
+ * Return: VBox status code
+ */
+static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
+{
+ int rc;
+
+ /*
+ * We use a pre-allocated request for cancellations, which is
+ * protected by cancel_req_mutex. This means that all cancellations
+ * get serialized, this should be fine since they should be rare.
+ */
+ mutex_lock(&gdev->cancel_req_mutex);
+ gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
+ rc = vbg_req_perform(gdev, gdev->cancel_req);
+ mutex_unlock(&gdev->cancel_req_mutex);
+
+ if (rc == VERR_NOT_IMPLEMENTED) {
+ call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
+ call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
+
+ rc = vbg_req_perform(gdev, call);
+ if (rc == VERR_INVALID_PARAMETER)
+ rc = VERR_NOT_FOUND;
+ }
+
+ if (rc >= 0)
+ call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
+
+ return rc;
+}
+
+/**
+ * Performs the call and completion wait.
+ * Return: 0 or negative errno value.
+ * @gdev: The VBoxGuest device extension.
+ * @call: The call to execute.
+ * @timeout_ms: Timeout in ms.
+ * @leak_it: Where to return the leak it / free it, indicator.
+ * Cancellation fun.
+ */
+static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
+ u32 timeout_ms, bool *leak_it)
+{
+ int rc, cancel_rc, ret;
+ long timeout;
+
+ *leak_it = false;
+
+ rc = vbg_req_perform(gdev, call);
+
+ /*
+ * If the call failed, then pretend success. Upper layers will
+ * interpret the result code in the packet.
+ */
+ if (rc < 0) {
+ call->header.result = rc;
+ return 0;
+ }
+
+ if (rc != VINF_HGCM_ASYNC_EXECUTE)
+ return 0;
+
+ /* Host decided to process the request asynchronously, wait for it */
+ if (timeout_ms == U32_MAX)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(timeout_ms);
+
+ timeout = wait_event_interruptible_timeout(
+ gdev->hgcm_wq,
+ hgcm_req_done(gdev, &call->header),
+ timeout);
+
+ /* timeout > 0 means hgcm_req_done has returned true, so success */
+ if (timeout > 0)
+ return 0;
+
+ if (timeout == 0)
+ ret = -ETIMEDOUT;
+ else
+ ret = -EINTR;
+
+ /* Cancel the request */
+ cancel_rc = hgcm_cancel_call(gdev, call);
+ if (cancel_rc >= 0)
+ return ret;
+
+ /*
+ * Failed to cancel, this should mean that the cancel has lost the
+ * race with normal completion, wait while the host completes it.
+ */
+ if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
+ timeout = msecs_to_jiffies(500);
+ else
+ timeout = msecs_to_jiffies(2000);
+
+ timeout = wait_event_timeout(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &call->header),
+ timeout);
+
+ if (WARN_ON(timeout == 0)) {
+ /* We really should never get here */
+ vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
+ __func__);
+ *leak_it = true;
+ return ret;
+ }
+
+ /* The call has completed normally after all */
+ return 0;
+}
+
+/**
+ * Copies the result of the call back to the caller info structure and user
+ * buffers.
+ * Return: 0 or negative errno value.
+ * @call: HGCM call request.
+ * @dst_parm: Pointer to function call parameters destination.
+ * @parm_count: Number of function call parameters.
+ * @bounce_bufs: The bouncebuffer array.
+ */
+static int hgcm_call_copy_back_result(
+ const struct vmmdev_hgcm_call *call,
+ struct vmmdev_hgcm_function_parameter *dst_parm,
+ u32 parm_count, void **bounce_bufs)
+{
+ const struct vmmdev_hgcm_function_parameter *src_parm =
+ VMMDEV_HGCM_CALL_PARMS(call);
+ void __user *p;
+ int ret;
+ u32 i;
+
+ /* Copy back parameters. */
+ for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
+ switch (dst_parm->type) {
+ case VMMDEV_HGCM_PARM_TYPE_32BIT:
+ case VMMDEV_HGCM_PARM_TYPE_64BIT:
+ *dst_parm = *src_parm;
+ break;
+
+ case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
+ dst_parm->u.page_list.size = src_parm->u.page_list.size;
+ break;
+
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
+ dst_parm->u.pointer.size = src_parm->u.pointer.size;
+ break;
+
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+ dst_parm->u.pointer.size = src_parm->u.pointer.size;
+
+ p = (void __user *)dst_parm->u.pointer.u.linear_addr;
+ ret = copy_to_user(p, bounce_bufs[i],
+ min(src_parm->u.pointer.size,
+ dst_parm->u.pointer.size));
+ if (ret)
+ return -EFAULT;
+ break;
+
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
+ u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
+ u32 parm_count, int *vbox_status)
+{
+ struct vmmdev_hgcm_call *call;
+ void **bounce_bufs = NULL;
+ bool leak_it;
+ size_t size;
+ int i, ret;
+
+ size = sizeof(struct vmmdev_hgcm_call) +
+ parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
+ /*
+ * Validate and buffer the parameters for the call. This also increases
+ * call_size with the amount of extra space needed for page lists.
+ */
+ ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
+ if (ret) {
+ /* Even on error bounce bufs may still have been allocated */
+ goto free_bounce_bufs;
+ }
+
+ call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
+ if (!call) {
+ ret = -ENOMEM;
+ goto free_bounce_bufs;
+ }
+
+ hgcm_call_init_call(call, client_id, function, parms, parm_count,
+ bounce_bufs);
+
+ ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
+ if (ret == 0) {
+ *vbox_status = call->header.result;
+ ret = hgcm_call_copy_back_result(call, parms, parm_count,
+ bounce_bufs);
+ }
+
+ if (!leak_it)
+ kfree(call);
+
+free_bounce_bufs:
+ if (bounce_bufs) {
+ for (i = 0; i < parm_count; i++)
+ kvfree(bounce_bufs[i]);
+ kfree(bounce_bufs);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(vbg_hgcm_call);
+
+#ifdef CONFIG_COMPAT
+int vbg_hgcm_call32(
+ struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
+ struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
+ int *vbox_status)
+{
+ struct vmmdev_hgcm_function_parameter *parm64 = NULL;
+ u32 i, size;
+ int ret = 0;
+
+ /* KISS allocate a temporary request and convert the parameters. */
+ size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
+ parm64 = kzalloc(size, GFP_KERNEL);
+ if (!parm64)
+ return -ENOMEM;
+
+ for (i = 0; i < parm_count; i++) {
+ switch (parm32[i].type) {
+ case VMMDEV_HGCM_PARM_TYPE_32BIT:
+ parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parm64[i].u.value32 = parm32[i].u.value32;
+ break;
+
+ case VMMDEV_HGCM_PARM_TYPE_64BIT:
+ parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
+ parm64[i].u.value64 = parm32[i].u.value64;
+ break;
+
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+ parm64[i].type = parm32[i].type;
+ parm64[i].u.pointer.size = parm32[i].u.pointer.size;
+ parm64[i].u.pointer.u.linear_addr =
+ parm32[i].u.pointer.u.linear_addr;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+ if (ret < 0)
+ goto out_free;
+ }
+
+ ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
+ parm64, parm_count, vbox_status);
+ if (ret < 0)
+ goto out_free;
+
+ /* Copy back. */
+ for (i = 0; i < parm_count; i++, parm32++, parm64++) {
+ switch (parm64[i].type) {
+ case VMMDEV_HGCM_PARM_TYPE_32BIT:
+ parm32[i].u.value32 = parm64[i].u.value32;
+ break;
+
+ case VMMDEV_HGCM_PARM_TYPE_64BIT:
+ parm32[i].u.value64 = parm64[i].u.value64;
+ break;
+
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR:
+ case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
+ parm32[i].u.pointer.size = parm64[i].u.pointer.size;
+ break;
+
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+ }
+
+out_free:
+ kfree(parm64);
+ return ret;
+}
+#endif
+
+static const int vbg_status_code_to_errno_table[] = {
+ [-VERR_ACCESS_DENIED] = -EPERM,
+ [-VERR_FILE_NOT_FOUND] = -ENOENT,
+ [-VERR_PROCESS_NOT_FOUND] = -ESRCH,
+ [-VERR_INTERRUPTED] = -EINTR,
+ [-VERR_DEV_IO_ERROR] = -EIO,
+ [-VERR_TOO_MUCH_DATA] = -E2BIG,
+ [-VERR_BAD_EXE_FORMAT] = -ENOEXEC,
+ [-VERR_INVALID_HANDLE] = -EBADF,
+ [-VERR_TRY_AGAIN] = -EAGAIN,
+ [-VERR_NO_MEMORY] = -ENOMEM,
+ [-VERR_INVALID_POINTER] = -EFAULT,
+ [-VERR_RESOURCE_BUSY] = -EBUSY,
+ [-VERR_ALREADY_EXISTS] = -EEXIST,
+ [-VERR_NOT_SAME_DEVICE] = -EXDEV,
+ [-VERR_NOT_A_DIRECTORY] = -ENOTDIR,
+ [-VERR_PATH_NOT_FOUND] = -ENOTDIR,
+ [-VERR_INVALID_NAME] = -ENOENT,
+ [-VERR_IS_A_DIRECTORY] = -EISDIR,
+ [-VERR_INVALID_PARAMETER] = -EINVAL,
+ [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE,
+ [-VERR_INVALID_FUNCTION] = -ENOTTY,
+ [-VERR_SHARING_VIOLATION] = -ETXTBSY,
+ [-VERR_FILE_TOO_BIG] = -EFBIG,
+ [-VERR_DISK_FULL] = -ENOSPC,
+ [-VERR_SEEK_ON_DEVICE] = -ESPIPE,
+ [-VERR_WRITE_PROTECT] = -EROFS,
+ [-VERR_BROKEN_PIPE] = -EPIPE,
+ [-VERR_DEADLOCK] = -EDEADLK,
+ [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG,
+ [-VERR_FILE_LOCK_FAILED] = -ENOLCK,
+ [-VERR_NOT_IMPLEMENTED] = -ENOSYS,
+ [-VERR_NOT_SUPPORTED] = -ENOSYS,
+ [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY,
+ [-VERR_TOO_MANY_SYMLINKS] = -ELOOP,
+ [-VERR_NO_MORE_FILES] = -ENODATA,
+ [-VERR_NO_DATA] = -ENODATA,
+ [-VERR_NET_NO_NETWORK] = -ENONET,
+ [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ,
+ [-VERR_NO_TRANSLATION] = -EILSEQ,
+ [-VERR_NET_NOT_SOCKET] = -ENOTSOCK,
+ [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ,
+ [-VERR_NET_MSG_SIZE] = -EMSGSIZE,
+ [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE,
+ [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT,
+ [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT,
+ [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT,
+ [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP,
+ [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT,
+ [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT,
+ [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE,
+ [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL,
+ [-VERR_NET_DOWN] = -ENETDOWN,
+ [-VERR_NET_UNREACHABLE] = -ENETUNREACH,
+ [-VERR_NET_CONNECTION_RESET] = -ENETRESET,
+ [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED,
+ [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET,
+ [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS,
+ [-VERR_NET_ALREADY_CONNECTED] = -EISCONN,
+ [-VERR_NET_NOT_CONNECTED] = -ENOTCONN,
+ [-VERR_NET_SHUTDOWN] = -ESHUTDOWN,
+ [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS,
+ [-VERR_TIMEOUT] = -ETIMEDOUT,
+ [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED,
+ [-VERR_NET_HOST_DOWN] = -EHOSTDOWN,
+ [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH,
+ [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY,
+ [-VERR_NET_IN_PROGRESS] = -EINPROGRESS,
+ [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM,
+ [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE,
+};
+
+int vbg_status_code_to_errno(int rc)
+{
+ if (rc >= 0)
+ return 0;
+
+ rc = -rc;
+ if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
+ vbg_status_code_to_errno_table[rc] == 0) {
+ vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
+ return -EPROTO;
+ }
+
+ return vbg_status_code_to_errno_table[rc];
+}
+EXPORT_SYMBOL(vbg_status_code_to_errno);
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h
new file mode 100644
index 000000000000..77f0c8f8a231
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_version.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * VBox Guest additions version info, this is used by the host to determine
+ * supported guest-addition features in some cases. So this will need to be
+ * synced with vbox upstreams versioning scheme when we implement / port
+ * new features from the upstream out-of-tree vboxguest driver.
+ */
+
+#ifndef __VBOX_VERSION_H__
+#define __VBOX_VERSION_H__
+
+/* Last synced October 4th 2017 */
+#define VBG_VERSION_MAJOR 5
+#define VBG_VERSION_MINOR 2
+#define VBG_VERSION_BUILD 0
+#define VBG_SVN_REV 68940
+#define VBG_VERSION_STRING "5.2.0"
+
+#endif
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
new file mode 100644
index 000000000000..5e2ae978935d
--- /dev/null
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/*
+ * Virtual Device for Guest <-> VMM/Host communication interface
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ */
+
+#ifndef __VBOX_VMMDEV_H__
+#define __VBOX_VMMDEV_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+#include <linux/vbox_vmmdev_types.h>
+
+/* Port for generic request interface (relative offset). */
+#define VMMDEV_PORT_OFF_REQUEST 0
+
+/** Layout of VMMDEV RAM region that contains information for guest. */
+struct vmmdev_memory {
+ /** The size of this structure. */
+ u32 size;
+ /** The structure version. (VMMDEV_MEMORY_VERSION) */
+ u32 version;
+
+ union {
+ struct {
+ /** Flag telling that VMMDev has events pending. */
+ u8 have_events;
+ /** Explicit padding, MBZ. */
+ u8 padding[3];
+ } V1_04;
+
+ struct {
+ /** Pending events flags, set by host. */
+ u32 host_events;
+ /** Mask of events the guest wants, set by guest. */
+ u32 guest_event_mask;
+ } V1_03;
+ } V;
+
+ /* struct vbva_memory, not used */
+};
+VMMDEV_ASSERT_SIZE(vmmdev_memory, 8 + 8);
+
+/** Version of vmmdev_memory structure (vmmdev_memory::version). */
+#define VMMDEV_MEMORY_VERSION (1)
+
+/* Host mouse capabilities has been changed. */
+#define VMMDEV_EVENT_MOUSE_CAPABILITIES_CHANGED BIT(0)
+/* HGCM event. */
+#define VMMDEV_EVENT_HGCM BIT(1)
+/* A display change request has been issued. */
+#define VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST BIT(2)
+/* Credentials are available for judgement. */
+#define VMMDEV_EVENT_JUDGE_CREDENTIALS BIT(3)
+/* The guest has been restored. */
+#define VMMDEV_EVENT_RESTORED BIT(4)
+/* Seamless mode state changed. */
+#define VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST BIT(5)
+/* Memory balloon size changed. */
+#define VMMDEV_EVENT_BALLOON_CHANGE_REQUEST BIT(6)
+/* Statistics interval changed. */
+#define VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST BIT(7)
+/* VRDP status changed. */
+#define VMMDEV_EVENT_VRDP BIT(8)
+/* New mouse position data available. */
+#define VMMDEV_EVENT_MOUSE_POSITION_CHANGED BIT(9)
+/* CPU hotplug event occurred. */
+#define VMMDEV_EVENT_CPU_HOTPLUG BIT(10)
+/* The mask of valid events, for sanity checking. */
+#define VMMDEV_EVENT_VALID_EVENT_MASK 0x000007ffU
+
+/*
+ * Additions are allowed to work only if additions_major == vmmdev_current &&
+ * additions_minor <= vmmdev_current. Additions version is reported to host
+ * (VMMDev) by VMMDEVREQ_REPORT_GUEST_INFO.
+ */
+#define VMMDEV_VERSION 0x00010004
+#define VMMDEV_VERSION_MAJOR (VMMDEV_VERSION >> 16)
+#define VMMDEV_VERSION_MINOR (VMMDEV_VERSION & 0xffff)
+
+/* Maximum request packet size. */
+#define VMMDEV_MAX_VMMDEVREQ_SIZE 1048576
+
+/* Version of vmmdev_request_header structure. */
+#define VMMDEV_REQUEST_HEADER_VERSION 0x10001
+
+/** struct vmmdev_request_header - Generic VMMDev request header. */
+struct vmmdev_request_header {
+ /** IN: Size of the structure in bytes (including body). */
+ u32 size;
+ /** IN: Version of the structure. */
+ u32 version;
+ /** IN: Type of the request. */
+ enum vmmdev_request_type request_type;
+ /** OUT: Return code. */
+ s32 rc;
+ /** Reserved field no.1. MBZ. */
+ u32 reserved1;
+ /** Reserved field no.2. MBZ. */
+ u32 reserved2;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
+
+/**
+ * struct vmmdev_mouse_status - Mouse status request structure.
+ *
+ * Used by VMMDEVREQ_GET_MOUSE_STATUS and VMMDEVREQ_SET_MOUSE_STATUS.
+ */
+struct vmmdev_mouse_status {
+ /** header */
+ struct vmmdev_request_header header;
+ /** Mouse feature mask. See VMMDEV_MOUSE_*. */
+ u32 mouse_features;
+ /** Mouse x position. */
+ s32 pointer_pos_x;
+ /** Mouse y position. */
+ s32 pointer_pos_y;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_mouse_status, 24 + 12);
+
+/* The guest can (== wants to) handle absolute coordinates. */
+#define VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE BIT(0)
+/*
+ * The host can (== wants to) send absolute coordinates.
+ * (Input not captured.)
+ */
+#define VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE BIT(1)
+/*
+ * The guest can *NOT* switch to software cursor and therefore depends on the
+ * host cursor.
+ *
+ * When guest additions are installed and the host has promised to display the
+ * cursor itself, the guest installs a hardware mouse driver. Don't ask the
+ * guest to switch to a software cursor then.
+ */
+#define VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR BIT(2)
+/* The host does NOT provide support for drawing the cursor itself. */
+#define VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER BIT(3)
+/* The guest can read VMMDev events to find out about pointer movement */
+#define VMMDEV_MOUSE_NEW_PROTOCOL BIT(4)
+/*
+ * If the guest changes the status of the VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR
+ * bit, the host will honour this.
+ */
+#define VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR BIT(5)
+/*
+ * The host supplies an absolute pointing device. The Guest Additions may
+ * wish to use this to decide whether to install their own driver.
+ */
+#define VMMDEV_MOUSE_HOST_HAS_ABS_DEV BIT(6)
+
+/* The minimum value our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE_MIN 0
+/* The maximum value our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE_MAX 0xFFFF
+
+/**
+ * struct vmmdev_host_version - VirtualBox host version request structure.
+ *
+ * VBG uses this to detect the precense of new features in the interface.
+ */
+struct vmmdev_host_version {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /** Major version. */
+ u16 major;
+ /** Minor version. */
+ u16 minor;
+ /** Build number. */
+ u32 build;
+ /** SVN revision. */
+ u32 revision;
+ /** Feature mask. */
+ u32 features;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_host_version, 24 + 16);
+
+/* Physical page lists are supported by HGCM. */
+#define VMMDEV_HVF_HGCM_PHYS_PAGE_LIST BIT(0)
+
+/**
+ * struct vmmdev_mask - Structure to set / clear bits in a mask used for
+ * VMMDEVREQ_SET_GUEST_CAPABILITIES and VMMDEVREQ_CTL_GUEST_FILTER_MASK.
+ */
+struct vmmdev_mask {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /** Mask of bits to be set. */
+ u32 or_mask;
+ /** Mask of bits to be cleared. */
+ u32 not_mask;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8);
+
+/* The guest supports seamless display rendering. */
+#define VMMDEV_GUEST_SUPPORTS_SEAMLESS BIT(0)
+/* The guest supports mapping guest to host windows. */
+#define VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING BIT(1)
+/*
+ * The guest graphical additions are active.
+ * Used for fast activation and deactivation of certain graphical operations
+ * (e.g. resizing & seamless). The legacy VMMDEVREQ_REPORT_GUEST_CAPABILITIES
+ * request sets this automatically, but VMMDEVREQ_SET_GUEST_CAPABILITIES does
+ * not.
+ */
+#define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2)
+
+/** struct vmmdev_hypervisorinfo - Hypervisor info structure. */
+struct vmmdev_hypervisorinfo {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /**
+ * Guest virtual address of proposed hypervisor start.
+ * Not used by VMMDEVREQ_GET_HYPERVISOR_INFO.
+ */
+ u32 hypervisor_start;
+ /** Hypervisor size in bytes. */
+ u32 hypervisor_size;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hypervisorinfo, 24 + 8);
+
+/** struct vmmdev_events - Pending events structure. */
+struct vmmdev_events {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /** OUT: Pending event mask. */
+ u32 events;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_events, 24 + 4);
+
+#define VMMDEV_OSTYPE_LINUX26 0x53000
+#define VMMDEV_OSTYPE_X64 BIT(8)
+
+/** struct vmmdev_guestinfo - Guest information report. */
+struct vmmdev_guest_info {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /**
+ * The VMMDev interface version expected by additions.
+ * *Deprecated*, do not use anymore! Will be removed.
+ */
+ u32 interface_version;
+ /** Guest OS type. */
+ u32 os_type;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
+
+/** struct vmmdev_guestinfo2 - Guest information report, version 2. */
+struct vmmdev_guest_info2 {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /** Major version. */
+ u16 additions_major;
+ /** Minor version. */
+ u16 additions_minor;
+ /** Build number. */
+ u32 additions_build;
+ /** SVN revision. */
+ u32 additions_revision;
+ /** Feature mask, currently unused. */
+ u32 additions_features;
+ /**
+ * The intentional meaning of this field was:
+ * Some additional information, for example 'Beta 1' or something like
+ * that.
+ *
+ * The way it was implemented was implemented: VBG_VERSION_STRING.
+ *
+ * This means the first three members are duplicated in this field (if
+ * the guest build config is sane). So, the user must check this and
+ * chop it off before usage. There is, because of the Main code's blind
+ * trust in the field's content, no way back.
+ */
+ char name[128];
+};
+VMMDEV_ASSERT_SIZE(vmmdev_guest_info2, 24 + 144);
+
+enum vmmdev_guest_facility_type {
+ VBOXGUEST_FACILITY_TYPE_UNKNOWN = 0,
+ VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER = 20,
+ /* VBoxGINA / VBoxCredProv / pam_vbox. */
+ VBOXGUEST_FACILITY_TYPE_AUTO_LOGON = 90,
+ VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE = 100,
+ /* VBoxTray (Windows), VBoxClient (Linux, Unix). */
+ VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT = 101,
+ VBOXGUEST_FACILITY_TYPE_SEAMLESS = 1000,
+ VBOXGUEST_FACILITY_TYPE_GRAPHICS = 1100,
+ VBOXGUEST_FACILITY_TYPE_ALL = 0x7ffffffe,
+ /* Ensure the enum is a 32 bit data-type */
+ VBOXGUEST_FACILITY_TYPE_SIZEHACK = 0x7fffffff
+};
+
+enum vmmdev_guest_facility_status {
+ VBOXGUEST_FACILITY_STATUS_INACTIVE = 0,
+ VBOXGUEST_FACILITY_STATUS_PAUSED = 1,
+ VBOXGUEST_FACILITY_STATUS_PRE_INIT = 20,
+ VBOXGUEST_FACILITY_STATUS_INIT = 30,
+ VBOXGUEST_FACILITY_STATUS_ACTIVE = 50,
+ VBOXGUEST_FACILITY_STATUS_TERMINATING = 100,
+ VBOXGUEST_FACILITY_STATUS_TERMINATED = 101,
+ VBOXGUEST_FACILITY_STATUS_FAILED = 800,
+ VBOXGUEST_FACILITY_STATUS_UNKNOWN = 999,
+ /* Ensure the enum is a 32 bit data-type */
+ VBOXGUEST_FACILITY_STATUS_SIZEHACK = 0x7fffffff
+};
+
+/** struct vmmdev_guest_status - Guest Additions status structure. */
+struct vmmdev_guest_status {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /** Facility the status is indicated for. */
+ enum vmmdev_guest_facility_type facility;
+ /** Current guest status. */
+ enum vmmdev_guest_facility_status status;
+ /** Flags, not used at the moment. */
+ u32 flags;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_guest_status, 24 + 12);
+
+#define VMMDEV_MEMORY_BALLOON_CHUNK_SIZE (1048576)
+#define VMMDEV_MEMORY_BALLOON_CHUNK_PAGES (1048576 / 4096)
+
+/** struct vmmdev_memballoon_info - Memory-balloon info structure. */
+struct vmmdev_memballoon_info {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /** Balloon size in megabytes. */
+ u32 balloon_chunks;
+ /** Guest ram size in megabytes. */
+ u32 phys_mem_chunks;
+ /**
+ * Setting this to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST indicates that
+ * the request is a response to that event.
+ * (Don't confuse this with VMMDEVREQ_ACKNOWLEDGE_EVENTS.)
+ */
+ u32 event_ack;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_memballoon_info, 24 + 12);
+
+/** struct vmmdev_memballoon_change - Change the size of the balloon. */
+struct vmmdev_memballoon_change {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /** The number of pages in the array. */
+ u32 pages;
+ /** true = inflate, false = deflate. */
+ u32 inflate;
+ /** Physical address (u64) of each page. */
+ u64 phys_page[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES];
+};
+
+/** struct vmmdev_write_core_dump - Write Core Dump request data. */
+struct vmmdev_write_core_dump {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /** Flags (reserved, MBZ). */
+ u32 flags;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_write_core_dump, 24 + 4);
+
+/** struct vmmdev_heartbeat - Heart beat check state structure. */
+struct vmmdev_heartbeat {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /** OUT: Guest heartbeat interval in nanosec. */
+ u64 interval_ns;
+ /** Heartbeat check flag. */
+ u8 enabled;
+ /** Explicit padding, MBZ. */
+ u8 padding[3];
+} __packed;
+VMMDEV_ASSERT_SIZE(vmmdev_heartbeat, 24 + 12);
+
+#define VMMDEV_HGCM_REQ_DONE BIT(0)
+#define VMMDEV_HGCM_REQ_CANCELLED BIT(1)
+
+/** struct vmmdev_hgcmreq_header - vmmdev HGCM requests header. */
+struct vmmdev_hgcmreq_header {
+ /** Request header. */
+ struct vmmdev_request_header header;
+
+ /** HGCM flags. */
+ u32 flags;
+
+ /** Result code. */
+ s32 result;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcmreq_header, 24 + 8);
+
+/** struct vmmdev_hgcm_connect - HGCM connect request structure. */
+struct vmmdev_hgcm_connect {
+ /** HGCM request header. */
+ struct vmmdev_hgcmreq_header header;
+
+ /** IN: Description of service to connect to. */
+ struct vmmdev_hgcm_service_location loc;
+
+ /** OUT: Client identifier assigned by local instance of HGCM. */
+ u32 client_id;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_connect, 32 + 132 + 4);
+
+/** struct vmmdev_hgcm_disconnect - HGCM disconnect request structure. */
+struct vmmdev_hgcm_disconnect {
+ /** HGCM request header. */
+ struct vmmdev_hgcmreq_header header;
+
+ /** IN: Client identifier. */
+ u32 client_id;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_disconnect, 32 + 4);
+
+#define VMMDEV_HGCM_MAX_PARMS 32
+
+/** struct vmmdev_hgcm_call - HGCM call request structure. */
+struct vmmdev_hgcm_call {
+ /* request header */
+ struct vmmdev_hgcmreq_header header;
+
+ /** IN: Client identifier. */
+ u32 client_id;
+ /** IN: Service function number. */
+ u32 function;
+ /** IN: Number of parameters. */
+ u32 parm_count;
+ /** Parameters follow in form: HGCMFunctionParameter32|64 parms[X]; */
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_call, 32 + 12);
+
+/**
+ * struct vmmdev_hgcm_cancel2 - HGCM cancel request structure, version 2.
+ *
+ * After the request header.rc will be:
+ *
+ * VINF_SUCCESS when cancelled.
+ * VERR_NOT_FOUND if the specified request cannot be found.
+ * VERR_INVALID_PARAMETER if the address is invalid valid.
+ */
+struct vmmdev_hgcm_cancel2 {
+ /** Header. */
+ struct vmmdev_request_header header;
+ /** The physical address of the request to cancel. */
+ u32 phys_req_to_cancel;
+};
+VMMDEV_ASSERT_SIZE(vmmdev_hgcm_cancel2, 24 + 4);
+
+#endif
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 81246221a13b..92500f6bdad1 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1290,7 +1290,7 @@ struct vme_error_handler *vme_register_error_handler(
{
struct vme_error_handler *handler;
- handler = kmalloc(sizeof(*handler), GFP_KERNEL);
+ handler = kmalloc(sizeof(*handler), GFP_ATOMIC);
if (!handler)
return NULL;