summaryrefslogtreecommitdiffstats
path: root/drivers/firmware
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/Kconfig12
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/arm_ffa/bus.c14
-rw-r--r--drivers/firmware/arm_ffa/driver.c532
-rw-r--r--drivers/firmware/arm_scmi/bus.c69
-rw-r--r--drivers/firmware/arm_scmi/driver.c10
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c2
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c30
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin.c2
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c15
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c51
-rw-r--r--drivers/firmware/dmi-sysfs.c28
-rw-r--r--drivers/firmware/dmi_scan.c4
-rw-r--r--drivers/firmware/efi/cper.c6
-rw-r--r--drivers/firmware/efi/cper_cxl.c39
-rw-r--r--drivers/firmware/efi/cper_cxl.h66
-rw-r--r--drivers/firmware/efi/efibc.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile10
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/libstub/intrinsics.c26
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c4
-rw-r--r--drivers/firmware/efi/libstub/x86-mixed.S253
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c52
-rw-r--r--drivers/firmware/efi/libstub/zboot-decompress-gzip.c68
-rw-r--r--drivers/firmware/efi/libstub/zboot-decompress-zstd.c49
-rw-r--r--drivers/firmware/efi/libstub/zboot.c65
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds1
-rw-r--r--drivers/firmware/efi/mokvar-table.c4
-rw-r--r--drivers/firmware/efi/rci2-table.c2
-rw-r--r--drivers/firmware/imx/imx-scu.c1
-rw-r--r--drivers/firmware/psci/psci_checker.c2
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c18
-rw-r--r--drivers/firmware/qcom/qcom_scm.c4
-rw-r--r--drivers/firmware/qemu_fw_cfg.c6
-rw-r--r--drivers/firmware/samsung/Kconfig14
-rw-r--r--drivers/firmware/samsung/Makefile4
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c224
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.h29
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c769
-rw-r--r--drivers/firmware/samsung/exynos-acpm.h23
-rw-r--r--drivers/firmware/smccc/kvm_guest.c66
-rw-r--r--drivers/firmware/smccc/soc_id.c80
-rw-r--r--drivers/firmware/stratix10-svc.c14
-rw-r--r--drivers/firmware/thead,th1520-aon.c250
-rw-r--r--drivers/firmware/xilinx/zynqmp.c6
45 files changed, 2460 insertions, 471 deletions
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 9f35f69e0f9e..aadc395ee168 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -212,9 +212,20 @@ config SYSFB_SIMPLEFB
If unsure, say Y.
+config TH1520_AON_PROTOCOL
+ tristate "Always-On firmware protocol"
+ depends on ARCH_THEAD || COMPILE_TEST
+ depends on MAILBOX
+ help
+ Power, clock, and resource management capabilities on the TH1520 SoC are
+ managed by the E902 core. Firmware running on this core communicates with
+ the kernel through the Always-On protocol, using hardware mailbox as a medium.
+ Say yes if you need such capabilities.
+
config TI_SCI_PROTOCOL
tristate "TI System Control Interface (TISCI) Message Protocol"
depends on TI_MESSAGE_MANAGER
+ default ARCH_K3
help
TI System Control Interface (TISCI) Message Protocol is used to manage
compute systems such as ARM, DSP etc with the system controller in
@@ -267,6 +278,7 @@ source "drivers/firmware/meson/Kconfig"
source "drivers/firmware/microchip/Kconfig"
source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/qcom/Kconfig"
+source "drivers/firmware/samsung/Kconfig"
source "drivers/firmware/smccc/Kconfig"
source "drivers/firmware/tegra/Kconfig"
source "drivers/firmware/xilinx/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 7a8d486e718f..4ddec2820c96 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
obj-$(CONFIG_SYSFB) += sysfb.o
obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o
+obj-$(CONFIG_TH1520_AON_PROTOCOL) += thead,th1520-aon.o
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
@@ -33,6 +34,7 @@ obj-y += efi/
obj-y += imx/
obj-y += psci/
obj-y += qcom/
+obj-y += samsung/
obj-y += smccc/
obj-y += tegra/
obj-y += xilinx/
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index dfda5ffc14db..50bfe56c755e 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -15,7 +15,7 @@
#include "common.h"
-#define SCMI_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
+#define FFA_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
static DEFINE_IDA(ffa_bus_id);
@@ -68,7 +68,7 @@ static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *e
{
const struct ffa_device *ffa_dev = to_ffa_dev(dev);
- return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
+ return add_uevent_var(env, "MODALIAS=" FFA_UEVENT_MODALIAS_FMT,
ffa_dev->vm_id, &ffa_dev->uuid);
}
@@ -77,7 +77,7 @@ static ssize_t modalias_show(struct device *dev,
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
- return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
+ return sysfs_emit(buf, FFA_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
&ffa_dev->uuid);
}
static DEVICE_ATTR_RO(modalias);
@@ -160,11 +160,12 @@ static int __ffa_devices_unregister(struct device *dev, void *data)
return 0;
}
-static void ffa_devices_unregister(void)
+void ffa_devices_unregister(void)
{
bus_for_each_dev(&ffa_bus_type, NULL, NULL,
__ffa_devices_unregister);
}
+EXPORT_SYMBOL_GPL(ffa_devices_unregister);
bool ffa_device_is_valid(struct ffa_device *ffa_dev)
{
@@ -192,7 +193,6 @@ ffa_device_register(const struct ffa_partition_info *part_info,
const struct ffa_ops *ops)
{
int id, ret;
- uuid_t uuid;
struct device *dev;
struct ffa_device *ffa_dev;
@@ -212,14 +212,14 @@ ffa_device_register(const struct ffa_partition_info *part_info,
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
+ dev->dma_mask = &dev->coherent_dma_mask;
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;
ffa_dev->vm_id = part_info->id;
ffa_dev->properties = part_info->properties;
ffa_dev->ops = ops;
- import_uuid(&uuid, (u8 *)part_info->uuid);
- uuid_copy(&ffa_dev->uuid, &uuid);
+ uuid_copy(&ffa_dev->uuid, &part_info->uuid);
ret = device_register(&ffa_dev->dev);
if (ret) {
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 2c2ec3c35f15..19295282de24 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -44,7 +44,7 @@
#include "common.h"
-#define FFA_DRIVER_VERSION FFA_VERSION_1_1
+#define FFA_DRIVER_VERSION FFA_VERSION_1_2
#define FFA_MIN_VERSION FFA_VERSION_1_0
#define SENDER_ID_MASK GENMASK(31, 16)
@@ -114,7 +114,6 @@ struct ffa_drv_info {
};
static struct ffa_drv_info *drv_info;
-static void ffa_partitions_cleanup(void);
/*
* The driver must be able to support all the versions from the earliest
@@ -145,11 +144,19 @@ static int ffa_version_check(u32 *version)
.a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
}, &ver);
- if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
+ if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) {
pr_info("FFA_VERSION returned not supported\n");
return -EOPNOTSUPP;
}
+ if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
+ pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
+ FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
+ FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
+ FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
+ return -EINVAL;
+ }
+
if (ver.a0 < FFA_MIN_VERSION) {
pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
@@ -276,9 +283,21 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
}
if (buffer && count <= num_partitions)
- for (idx = 0; idx < count; idx++)
- memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
- buf_sz);
+ for (idx = 0; idx < count; idx++) {
+ struct ffa_partition_info_le {
+ __le16 id;
+ __le16 exec_ctxt;
+ __le32 properties;
+ uuid_t uuid;
+ } *rx_buf = drv_info->rx_buffer + idx * sz;
+ struct ffa_partition_info *buf = buffer + idx;
+
+ buf->id = le16_to_cpu(rx_buf->id);
+ buf->exec_ctxt = le16_to_cpu(rx_buf->exec_ctxt);
+ buf->properties = le32_to_cpu(rx_buf->properties);
+ if (buf_sz > 8)
+ import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
+ }
ffa_rx_release();
@@ -295,14 +314,24 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
#define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x))))
#define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x))))
#define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x))))
+#define PART_INFO_ID_MASK GENMASK(15, 0)
+#define PART_INFO_EXEC_CXT_MASK GENMASK(31, 16)
+#define PART_INFO_PROPS_MASK GENMASK(63, 32)
+#define PART_INFO_ID(x) ((u16)(FIELD_GET(PART_INFO_ID_MASK, (x))))
+#define PART_INFO_EXEC_CXT(x) ((u16)(FIELD_GET(PART_INFO_EXEC_CXT_MASK, (x))))
+#define PART_INFO_PROPERTIES(x) ((u32)(FIELD_GET(PART_INFO_PROPS_MASK, (x))))
static int
__ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
struct ffa_partition_info *buffer, int num_parts)
{
u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0;
+ struct ffa_partition_info *buf = buffer;
ffa_value_t partition_info;
do {
+ __le64 *regs;
+ int idx;
+
start_idx = prev_idx ? prev_idx + 1 : 0;
invoke_ffa_fn((ffa_value_t){
@@ -326,8 +355,25 @@ __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
if (buf_sz > sizeof(*buffer))
buf_sz = sizeof(*buffer);
- memcpy(buffer + prev_idx * buf_sz, &partition_info.a3,
- (cur_idx - start_idx + 1) * buf_sz);
+ regs = (void *)&partition_info.a3;
+ for (idx = 0; idx < cur_idx - start_idx + 1; idx++, buf++) {
+ union {
+ uuid_t uuid;
+ u64 regs[2];
+ } uuid_regs = {
+ .regs = {
+ le64_to_cpu(*(regs + 1)),
+ le64_to_cpu(*(regs + 2)),
+ }
+ };
+ u64 val = *(u64 *)regs;
+
+ buf->id = PART_INFO_ID(val);
+ buf->exec_ctxt = PART_INFO_EXEC_CXT(val);
+ buf->properties = PART_INFO_PROPERTIES(val);
+ uuid_copy(&buf->uuid, &uuid_regs.uuid);
+ regs += 3;
+ }
prev_idx = cur_idx;
} while (cur_idx < (count - 1));
@@ -445,9 +491,9 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
return -EINVAL;
}
-static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
+static int ffa_msg_send2(struct ffa_device *dev, u16 src_id, void *buf, size_t sz)
{
- u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
+ u32 src_dst_ids = PACK_TARGET_INFO(src_id, dev->vm_id);
struct ffa_indirect_msg_hdr *msg;
ffa_value_t ret;
int retval = 0;
@@ -463,6 +509,7 @@ static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
msg->offset = sizeof(*msg);
msg->send_recv_id = src_dst_ids;
msg->size = sz;
+ uuid_copy(&msg->uuid, &dev->uuid);
memcpy((u8 *)msg + msg->offset, buf, sz);
/* flags = 0, sender VMID = 0 works for both physical/virtual NS */
@@ -760,6 +807,13 @@ static int ffa_notification_bitmap_destroy(void)
return 0;
}
+enum notify_type {
+ SECURE_PARTITION,
+ NON_SECURE_VM,
+ SPM_FRAMEWORK,
+ NS_HYP_FRAMEWORK,
+};
+
#define NOTIFICATION_LOW_MASK GENMASK(31, 0)
#define NOTIFICATION_HIGH_MASK GENMASK(63, 32)
#define NOTIFICATION_BITMAP_HIGH(x) \
@@ -783,10 +837,22 @@ static int ffa_notification_bitmap_destroy(void)
#define MAX_IDS_32 10
#define PER_VCPU_NOTIFICATION_FLAG BIT(0)
-#define SECURE_PARTITION_BITMAP BIT(0)
-#define NON_SECURE_VM_BITMAP BIT(1)
-#define SPM_FRAMEWORK_BITMAP BIT(2)
-#define NS_HYP_FRAMEWORK_BITMAP BIT(3)
+#define SECURE_PARTITION_BITMAP_ENABLE BIT(SECURE_PARTITION)
+#define NON_SECURE_VM_BITMAP_ENABLE BIT(NON_SECURE_VM)
+#define SPM_FRAMEWORK_BITMAP_ENABLE BIT(SPM_FRAMEWORK)
+#define NS_HYP_FRAMEWORK_BITMAP_ENABLE BIT(NS_HYP_FRAMEWORK)
+#define FFA_BITMAP_SECURE_ENABLE_MASK \
+ (SECURE_PARTITION_BITMAP_ENABLE | SPM_FRAMEWORK_BITMAP_ENABLE)
+#define FFA_BITMAP_NS_ENABLE_MASK \
+ (NON_SECURE_VM_BITMAP_ENABLE | NS_HYP_FRAMEWORK_BITMAP_ENABLE)
+#define FFA_BITMAP_ALL_ENABLE_MASK \
+ (FFA_BITMAP_SECURE_ENABLE_MASK | FFA_BITMAP_NS_ENABLE_MASK)
+
+#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
+
+#define SPM_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_LOW(x)
+#define NS_HYP_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_HIGH(x)
+#define FRAMEWORK_NOTIFY_RX_BUFFER_FULL BIT(0)
static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
u32 flags, bool is_bind)
@@ -852,9 +918,15 @@ static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
else if (ret.a0 != FFA_SUCCESS)
return -EINVAL; /* Something else went wrong. */
- notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
- notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
- notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7);
+ if (flags & SECURE_PARTITION_BITMAP_ENABLE)
+ notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
+ if (flags & NON_SECURE_VM_BITMAP_ENABLE)
+ notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
+ if (flags & SPM_FRAMEWORK_BITMAP_ENABLE)
+ notify->arch_map = SPM_FRAMEWORK_BITMAP(ret.a6);
+ if (flags & NS_HYP_FRAMEWORK_BITMAP_ENABLE)
+ notify->arch_map = PACK_NOTIFICATION_BITMAP(notify->arch_map,
+ ret.a7);
return 0;
}
@@ -863,27 +935,32 @@ struct ffa_dev_part_info {
ffa_sched_recv_cb callback;
void *cb_data;
rwlock_t rw_lock;
+ struct ffa_device *dev;
+ struct list_head node;
};
static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
{
- struct ffa_dev_part_info *partition;
+ struct ffa_dev_part_info *partition = NULL, *tmp;
ffa_sched_recv_cb callback;
+ struct list_head *phead;
void *cb_data;
- partition = xa_load(&drv_info->partition_info, part_id);
- if (!partition) {
+ phead = xa_load(&drv_info->partition_info, part_id);
+ if (!phead) {
pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
return;
}
- read_lock(&partition->rw_lock);
- callback = partition->callback;
- cb_data = partition->cb_data;
- read_unlock(&partition->rw_lock);
+ list_for_each_entry_safe(partition, tmp, phead, node) {
+ read_lock(&partition->rw_lock);
+ callback = partition->callback;
+ cb_data = partition->cb_data;
+ read_unlock(&partition->rw_lock);
- if (callback)
- callback(vcpu, is_per_vcpu, cb_data);
+ if (callback)
+ callback(vcpu, is_per_vcpu, cb_data);
+ }
}
static void ffa_notification_info_get(void)
@@ -899,7 +976,7 @@ static void ffa_notification_info_get(void)
}, &ret);
if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
- if (ret.a2 != FFA_RET_NO_DATA)
+ if ((s32)ret.a2 != FFA_RET_NO_DATA)
pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
ret.a0, ret.a2);
return;
@@ -935,7 +1012,7 @@ static void ffa_notification_info_get(void)
}
/* Per vCPU Notification */
- for (idx = 0; idx < ids_count[list]; idx++) {
+ for (idx = 1; idx < ids_count[list]; idx++) {
if (ids_processed >= max_ids - 1)
break;
@@ -1015,17 +1092,17 @@ static int ffa_sync_send_receive(struct ffa_device *dev,
static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
{
- return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz);
+ return ffa_msg_send2(dev, drv_info->vm_id, buf, sz);
}
-static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid,
+static int ffa_sync_send_receive2(struct ffa_device *dev,
struct ffa_send_direct_data2 *data)
{
if (!drv_info->msg_direct_req2_supp)
return -EOPNOTSUPP;
return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id,
- uuid, data);
+ &dev->uuid, data);
}
static int ffa_memory_share(struct ffa_mem_ops_args *args)
@@ -1051,35 +1128,39 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args)
return ffa_memory_ops(FFA_MEM_LEND, args);
}
-#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
-
#define ffa_notifications_disabled() (!drv_info->notif_enabled)
-enum notify_type {
- NON_SECURE_VM,
- SECURE_PARTITION,
- FRAMEWORK,
-};
-
struct notifier_cb_info {
struct hlist_node hnode;
+ struct ffa_device *dev;
+ ffa_fwk_notifier_cb fwk_cb;
ffa_notifier_cb cb;
void *cb_data;
- enum notify_type type;
};
-static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
- void *cb_data, bool is_registration)
+static int
+ffa_sched_recv_cb_update(struct ffa_device *dev, ffa_sched_recv_cb callback,
+ void *cb_data, bool is_registration)
{
- struct ffa_dev_part_info *partition;
+ struct ffa_dev_part_info *partition = NULL, *tmp;
+ struct list_head *phead;
bool cb_valid;
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
- partition = xa_load(&drv_info->partition_info, part_id);
+ phead = xa_load(&drv_info->partition_info, dev->vm_id);
+ if (!phead) {
+ pr_err("%s: Invalid partition ID 0x%x\n", __func__, dev->vm_id);
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(partition, tmp, phead, node)
+ if (partition->dev == dev)
+ break;
+
if (!partition) {
- pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+ pr_err("%s: No such partition ID 0x%x\n", __func__, dev->vm_id);
return -EINVAL;
}
@@ -1101,12 +1182,12 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
static int ffa_sched_recv_cb_register(struct ffa_device *dev,
ffa_sched_recv_cb cb, void *cb_data)
{
- return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true);
+ return ffa_sched_recv_cb_update(dev, cb, cb_data, true);
}
static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
{
- return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false);
+ return ffa_sched_recv_cb_update(dev, NULL, NULL, false);
}
static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
@@ -1119,27 +1200,69 @@ static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
return ffa_notification_bind_common(dst_id, bitmap, 0, false);
}
-/* Should be called while the notify_lock is taken */
+static enum notify_type ffa_notify_type_get(u16 vm_id)
+{
+ if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
+ return SECURE_PARTITION;
+ else
+ return NON_SECURE_VM;
+}
+
+/* notifier_hnode_get* should be called with notify_lock held */
static struct notifier_cb_info *
-notifier_hash_node_get(u16 notify_id, enum notify_type type)
+notifier_hnode_get_by_vmid(u16 notify_id, int vmid)
{
struct notifier_cb_info *node;
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
- if (type == node->type)
+ if (node->fwk_cb && vmid == node->dev->vm_id)
+ return node;
+
+ return NULL;
+}
+
+static struct notifier_cb_info *
+notifier_hnode_get_by_vmid_uuid(u16 notify_id, int vmid, const uuid_t *uuid)
+{
+ struct notifier_cb_info *node;
+
+ if (uuid_is_null(uuid))
+ return notifier_hnode_get_by_vmid(notify_id, vmid);
+
+ hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
+ if (node->fwk_cb && vmid == node->dev->vm_id &&
+ uuid_equal(&node->dev->uuid, uuid))
+ return node;
+
+ return NULL;
+}
+
+static struct notifier_cb_info *
+notifier_hnode_get_by_type(u16 notify_id, enum notify_type type)
+{
+ struct notifier_cb_info *node;
+
+ hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
+ if (node->cb && type == ffa_notify_type_get(node->dev->vm_id))
return node;
return NULL;
}
static int
-update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
- void *cb_data, bool is_registration)
+update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb,
+ void *cb_data, bool is_registration, bool is_framework)
{
struct notifier_cb_info *cb_info = NULL;
+ enum notify_type type = ffa_notify_type_get(dev->vm_id);
bool cb_found;
- cb_info = notifier_hash_node_get(notify_id, type);
+ if (is_framework)
+ cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id,
+ &dev->uuid);
+ else
+ cb_info = notifier_hnode_get_by_type(notify_id, type);
+
cb_found = !!cb_info;
if (!(is_registration ^ cb_found))
@@ -1150,9 +1273,12 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
if (!cb_info)
return -ENOMEM;
- cb_info->type = type;
- cb_info->cb = cb;
+ cb_info->dev = dev;
cb_info->cb_data = cb_data;
+ if (is_framework)
+ cb_info->fwk_cb = cb;
+ else
+ cb_info->cb = cb;
hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
} else {
@@ -1162,18 +1288,10 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
return 0;
}
-static enum notify_type ffa_notify_type_get(u16 vm_id)
-{
- if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
- return SECURE_PARTITION;
- else
- return NON_SECURE_VM;
-}
-
-static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
+static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id,
+ bool is_framework)
{
int rc;
- enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@@ -1183,26 +1301,38 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
mutex_lock(&drv_info->notify_lock);
- rc = update_notifier_cb(notify_id, type, NULL, NULL, false);
+ rc = update_notifier_cb(dev, notify_id, NULL, NULL, false,
+ is_framework);
if (rc) {
pr_err("Could not unregister notification callback\n");
mutex_unlock(&drv_info->notify_lock);
return rc;
}
- rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ if (!is_framework)
+ rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
mutex_unlock(&drv_info->notify_lock);
return rc;
}
-static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
- ffa_notifier_cb cb, void *cb_data, int notify_id)
+static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
+{
+ return __ffa_notify_relinquish(dev, notify_id, false);
+}
+
+static int ffa_fwk_notify_relinquish(struct ffa_device *dev, int notify_id)
+{
+ return __ffa_notify_relinquish(dev, notify_id, true);
+}
+
+static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ void *cb, void *cb_data,
+ int notify_id, bool is_framework)
{
int rc;
u32 flags = 0;
- enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@@ -1212,26 +1342,44 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
mutex_lock(&drv_info->notify_lock);
- if (is_per_vcpu)
- flags = PER_VCPU_NOTIFICATION_FLAG;
+ if (!is_framework) {
+ if (is_per_vcpu)
+ flags = PER_VCPU_NOTIFICATION_FLAG;
- rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
- if (rc) {
- mutex_unlock(&drv_info->notify_lock);
- return rc;
+ rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
+ if (rc) {
+ mutex_unlock(&drv_info->notify_lock);
+ return rc;
+ }
}
- rc = update_notifier_cb(notify_id, type, cb, cb_data, true);
+ rc = update_notifier_cb(dev, notify_id, cb, cb_data, true,
+ is_framework);
if (rc) {
pr_err("Failed to register callback for %d - %d\n",
notify_id, rc);
- ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ if (!is_framework)
+ ffa_notification_unbind(dev->vm_id, BIT(notify_id));
}
mutex_unlock(&drv_info->notify_lock);
return rc;
}
+static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ ffa_notifier_cb cb, void *cb_data, int notify_id)
+{
+ return __ffa_notify_request(dev, is_per_vcpu, cb, cb_data, notify_id,
+ false);
+}
+
+static int
+ffa_fwk_notify_request(struct ffa_device *dev, ffa_fwk_notifier_cb cb,
+ void *cb_data, int notify_id)
+{
+ return __ffa_notify_request(dev, false, cb, cb_data, notify_id, true);
+}
+
static int ffa_notify_send(struct ffa_device *dev, int notify_id,
bool is_per_vcpu, u16 vcpu)
{
@@ -1258,7 +1406,7 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
continue;
mutex_lock(&drv_info->notify_lock);
- cb_info = notifier_hash_node_get(notify_id, type);
+ cb_info = notifier_hnode_get_by_type(notify_id, type);
mutex_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->cb)
@@ -1266,21 +1414,68 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
}
}
-static void notif_get_and_handle(void *unused)
+static void handle_fwk_notif_callbacks(u32 bitmap)
+{
+ void *buf;
+ uuid_t uuid;
+ int notify_id = 0, target;
+ struct ffa_indirect_msg_hdr *msg;
+ struct notifier_cb_info *cb_info = NULL;
+
+ /* Only one framework notification defined and supported for now */
+ if (!(bitmap & FRAMEWORK_NOTIFY_RX_BUFFER_FULL))
+ return;
+
+ mutex_lock(&drv_info->rx_lock);
+
+ msg = drv_info->rx_buffer;
+ buf = kmemdup((void *)msg + msg->offset, msg->size, GFP_KERNEL);
+ if (!buf) {
+ mutex_unlock(&drv_info->rx_lock);
+ return;
+ }
+
+ target = SENDER_ID(msg->send_recv_id);
+ if (msg->offset >= sizeof(*msg))
+ uuid_copy(&uuid, &msg->uuid);
+ else
+ uuid_copy(&uuid, &uuid_null);
+
+ mutex_unlock(&drv_info->rx_lock);
+
+ ffa_rx_release();
+
+ mutex_lock(&drv_info->notify_lock);
+ cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid);
+ mutex_unlock(&drv_info->notify_lock);
+
+ if (cb_info && cb_info->fwk_cb)
+ cb_info->fwk_cb(notify_id, cb_info->cb_data, buf);
+ kfree(buf);
+}
+
+static void notif_get_and_handle(void *cb_data)
{
int rc;
- struct ffa_notify_bitmaps bitmaps;
+ u32 flags;
+ struct ffa_drv_info *info = cb_data;
+ struct ffa_notify_bitmaps bitmaps = { 0 };
+
+ if (info->vm_id == 0) /* Non secure physical instance */
+ flags = FFA_BITMAP_SECURE_ENABLE_MASK;
+ else
+ flags = FFA_BITMAP_ALL_ENABLE_MASK;
- rc = ffa_notification_get(SECURE_PARTITION_BITMAP |
- SPM_FRAMEWORK_BITMAP, &bitmaps);
+ rc = ffa_notification_get(flags, &bitmaps);
if (rc) {
pr_err("Failed to retrieve notifications with %d!\n", rc);
return;
}
+ handle_fwk_notif_callbacks(SPM_FRAMEWORK_BITMAP(bitmaps.arch_map));
+ handle_fwk_notif_callbacks(NS_HYP_FRAMEWORK_BITMAP(bitmaps.arch_map));
handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
- handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK);
}
static void
@@ -1329,6 +1524,8 @@ static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
.sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
.notify_request = ffa_notify_request,
.notify_relinquish = ffa_notify_relinquish,
+ .fwk_notify_request = ffa_fwk_notify_request,
+ .fwk_notify_relinquish = ffa_fwk_notify_relinquish,
.notify_send = ffa_notify_send,
};
@@ -1384,11 +1581,110 @@ static struct notifier_block ffa_bus_nb = {
.notifier_call = ffa_bus_notifier,
};
+static int ffa_xa_add_partition_info(struct ffa_device *dev)
+{
+ struct ffa_dev_part_info *info;
+ struct list_head *head, *phead;
+ int ret = -ENOMEM;
+
+ phead = xa_load(&drv_info->partition_info, dev->vm_id);
+ if (phead) {
+ head = phead;
+ list_for_each_entry(info, head, node) {
+ if (info->dev == dev) {
+ pr_err("%s: duplicate dev %p part ID 0x%x\n",
+ __func__, dev, dev->vm_id);
+ return -EEXIST;
+ }
+ }
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ret;
+
+ rwlock_init(&info->rw_lock);
+ info->dev = dev;
+
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead)
+ goto free_out;
+
+ INIT_LIST_HEAD(phead);
+
+ ret = xa_insert(&drv_info->partition_info, dev->vm_id, phead,
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: failed to save part ID 0x%x Ret:%d\n",
+ __func__, dev->vm_id, ret);
+ goto free_out;
+ }
+ }
+ list_add(&info->node, phead);
+ return 0;
+
+free_out:
+ kfree(phead);
+ kfree(info);
+ return ret;
+}
+
+static int ffa_setup_host_partition(int vm_id)
+{
+ struct ffa_partition_info buf = { 0 };
+ struct ffa_device *ffa_dev;
+ int ret;
+
+ buf.id = vm_id;
+ ffa_dev = ffa_device_register(&buf, &ffa_drv_ops);
+ if (!ffa_dev) {
+ pr_err("%s: failed to register host partition ID 0x%x\n",
+ __func__, vm_id);
+ return -EINVAL;
+ }
+
+ ret = ffa_xa_add_partition_info(ffa_dev);
+ if (ret)
+ return ret;
+
+ if (ffa_notifications_disabled())
+ return 0;
+
+ ret = ffa_sched_recv_cb_update(ffa_dev, ffa_self_notif_handle,
+ drv_info, true);
+ if (ret)
+ pr_info("Failed to register driver sched callback %d\n", ret);
+
+ return ret;
+}
+
+static void ffa_partitions_cleanup(void)
+{
+ struct list_head *phead;
+ unsigned long idx;
+
+ /* Clean up/free all registered devices */
+ ffa_devices_unregister();
+
+ xa_for_each(&drv_info->partition_info, idx, phead) {
+ struct ffa_dev_part_info *info, *tmp;
+
+ xa_erase(&drv_info->partition_info, idx);
+ list_for_each_entry_safe(info, tmp, phead, node) {
+ list_del(&info->node);
+ kfree(info);
+ }
+ kfree(phead);
+ }
+
+ xa_destroy(&drv_info->partition_info);
+}
+
static int ffa_setup_partitions(void)
{
int count, idx, ret;
struct ffa_device *ffa_dev;
- struct ffa_dev_part_info *info;
struct ffa_partition_info *pbuf, *tpbuf;
if (drv_info->version == FFA_VERSION_1_0) {
@@ -1422,59 +1718,30 @@ static int ffa_setup_partitions(void)
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev);
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
+ if (ffa_xa_add_partition_info(ffa_dev)) {
ffa_device_unregister(ffa_dev);
continue;
}
- rwlock_init(&info->rw_lock);
- ret = xa_insert(&drv_info->partition_info, tpbuf->id,
- info, GFP_KERNEL);
- if (ret) {
- pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
- __func__, tpbuf->id, ret);
- ffa_device_unregister(ffa_dev);
- kfree(info);
- }
}
kfree(pbuf);
- /* Allocate for the host */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- /* Already registered devices are freed on bus_exit */
- ffa_partitions_cleanup();
- return -ENOMEM;
- }
+ /*
+ * Check if the host is already added as part of partition info
+ * No multiple UUID possible for the host, so just checking if
+ * there is an entry will suffice
+ */
+ if (xa_load(&drv_info->partition_info, drv_info->vm_id))
+ return 0;
- rwlock_init(&info->rw_lock);
- ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
- info, GFP_KERNEL);
- if (ret) {
- pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
- __func__, drv_info->vm_id, ret);
- kfree(info);
- /* Already registered devices are freed on bus_exit */
+ /* Allocate for the host */
+ ret = ffa_setup_host_partition(drv_info->vm_id);
+ if (ret)
ffa_partitions_cleanup();
- }
return ret;
}
-static void ffa_partitions_cleanup(void)
-{
- struct ffa_dev_part_info *info;
- unsigned long idx;
-
- xa_for_each(&drv_info->partition_info, idx, info) {
- xa_erase(&drv_info->partition_info, idx);
- kfree(info);
- }
-
- xa_destroy(&drv_info->partition_info);
-}
-
/* FFA FEATURE IDs */
#define FFA_FEAT_NOTIFICATION_PENDING_INT (1)
#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
@@ -1777,19 +2044,10 @@ static int __init ffa_init(void)
ffa_notifications_setup();
ret = ffa_setup_partitions();
- if (ret) {
- pr_err("failed to setup partitions\n");
- goto cleanup_notifs;
- }
-
- ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
- drv_info, true);
- if (ret)
- pr_info("Failed to register driver sched callback %d\n", ret);
-
- return 0;
+ if (!ret)
+ return ret;
-cleanup_notifs:
+ pr_err("failed to setup partitions\n");
ffa_notifications_cleanup();
free_pages:
if (drv_info->tx_buffer)
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index a3386bf36de5..7af01664ce7e 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -17,6 +17,8 @@
#include "common.h"
+#define SCMI_UEVENT_MODALIAS_FMT "%s:%02x:%s"
+
BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
@@ -42,7 +44,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
* This helper let an SCMI driver request specific devices identified by the
* @id_table to be created for each active SCMI instance.
*
- * The requested device name MUST NOT be already existent for any protocol;
+ * The requested device name MUST NOT be already existent for this protocol;
* at first the freshly requested @id_table is annotated in the IDR table
* @scmi_requested_devices and then the requested device is advertised to any
* registered party via the @scmi_requested_devices_nh notification chain.
@@ -52,7 +54,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
{
int ret = 0;
- unsigned int id = 0;
struct list_head *head, *phead = NULL;
struct scmi_requested_dev *rdev;
@@ -67,19 +68,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
}
/*
- * Search for the matching protocol rdev list and then search
- * of any existent equally named device...fails if any duplicate found.
+ * Find the matching protocol rdev list and then search of any
+ * existent equally named device...fails if any duplicate found.
*/
mutex_lock(&scmi_requested_devices_mtx);
- idr_for_each_entry(&scmi_requested_devices, head, id) {
- if (!phead) {
- /* A list found registered in the IDR is never empty */
- rdev = list_first_entry(head, struct scmi_requested_dev,
- node);
- if (rdev->id_table->protocol_id ==
- id_table->protocol_id)
- phead = head;
- }
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ head = phead;
list_for_each_entry(rdev, head, node) {
if (!strcmp(rdev->id_table->name, id_table->name)) {
pr_err("Ignoring duplicate request [%d] %s\n",
@@ -283,11 +278,59 @@ static void scmi_dev_remove(struct device *dev)
scmi_drv->remove(scmi_dev);
}
+static int scmi_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+}
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT,
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t protocol_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sprintf(buf, "0x%02x\n", scmi_dev->protocol_id);
+}
+static DEVICE_ATTR_RO(protocol_id);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sprintf(buf, "%s\n", scmi_dev->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *scmi_device_attributes_attrs[] = {
+ &dev_attr_protocol_id.attr,
+ &dev_attr_name.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(scmi_device_attributes);
+
const struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
.remove = scmi_dev_remove,
+ .uevent = scmi_device_uevent,
+ .dev_groups = scmi_device_attributes_groups,
};
EXPORT_SYMBOL_GPL(scmi_bus_type);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 60050da54bf2..1c75a4c9c371 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -1997,17 +1997,7 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
else if (db->width == 4)
SCMI_PROTO_FC_RING_DB(32);
else /* db->width == 8 */
-#ifdef CONFIG_64BIT
SCMI_PROTO_FC_RING_DB(64);
-#else
- {
- u64 val = 0;
-
- if (db->mask)
- val = ioread64_hi_lo(db->addr) & db->mask;
- iowrite64_hi_lo(db->set | val, db->addr);
- }
-#endif
}
/**
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 42433c19eb30..560724ce21aa 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -1631,6 +1631,7 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
cs_dsp_debugfs_save_wmfwname(dsp, file);
+ ret = 0;
out_fw:
cs_dsp_buf_free(&buf_list);
@@ -2338,6 +2339,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_debugfs_save_binname(dsp, file);
+ ret = 0;
out_fw:
cs_dsp_buf_free(&buf_list);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
index 161272e47bda..73412bcef50c 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
@@ -462,36 +462,6 @@ unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *pri
EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
/**
- * cs_dsp_mock_xm_header_get_fw_version_from_regmap() - Firmware version.
- *
- * @priv: Pointer to struct cs_dsp_test.
- *
- * Return: Firmware version word value.
- */
-unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv)
-{
- unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
- union {
- struct wmfw_id_hdr adsp2;
- struct wmfw_v3_id_hdr halo;
- } hdr;
-
- switch (priv->dsp->type) {
- case WMFW_ADSP2:
- regmap_raw_read(priv->dsp->regmap, xm, &hdr.adsp2, sizeof(hdr.adsp2));
- return be32_to_cpu(hdr.adsp2.ver);
- case WMFW_HALO:
- regmap_raw_read(priv->dsp->regmap, xm, &hdr.halo, sizeof(hdr.halo));
- return be32_to_cpu(hdr.halo.ver);
- default:
- KUNIT_FAIL(priv->test, NULL);
- return 0;
- }
-}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version_from_regmap,
- "FW_CS_DSP_KUNIT_TEST_UTILS");
-
-/**
* cs_dsp_mock_xm_header_get_fw_version() - Firmware version.
*
* @header: Pointer to struct cs_dsp_mock_xm_header.
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
index 1e161bbc5b4a..163b7faecff4 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
@@ -2198,7 +2198,7 @@ static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp)
priv->local->bin_builder =
cs_dsp_mock_bin_init(priv, 1,
- cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ cs_dsp_mock_xm_header_get_fw_version(xm_hdr));
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder);
/* We must provide a dummy wmfw to load */
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
index 5dcf62f19faf..a7ec956d2724 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
@@ -451,7 +451,7 @@ static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *ds
local->bin_builder =
cs_dsp_mock_bin_init(priv, 1,
- cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ cs_dsp_mock_xm_header_get_fw_version(local->xm_header));
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder);
/* Init cs_dsp */
@@ -534,11 +534,6 @@ static int cs_dsp_bin_err_test_adsp2_16bit_init(struct kunit *test)
return cs_dsp_bin_err_test_common_init(test, dsp, 1);
}
-static struct kunit_case cs_dsp_bin_err_test_cases_halo[] = {
-
- { } /* terminator */
-};
-
static void cs_dsp_bin_err_block_types_desc(const struct cs_dsp_bin_test_param *param,
char *desc)
{
@@ -560,7 +555,7 @@ KUNIT_ARRAY_PARAM(bin_test_block_types,
bin_test_block_types_cases,
cs_dsp_bin_err_block_types_desc);
-static struct kunit_case cs_dsp_bin_err_test_cases_adsp2[] = {
+static struct kunit_case cs_dsp_bin_err_test_cases[] = {
KUNIT_CASE(bin_load_with_unknown_blocks),
KUNIT_CASE(bin_err_wrong_magic),
KUNIT_CASE(bin_err_too_short_for_header),
@@ -578,21 +573,21 @@ static struct kunit_suite cs_dsp_bin_err_test_halo = {
.name = "cs_dsp_bin_err_halo",
.init = cs_dsp_bin_err_test_halo_init,
.exit = cs_dsp_bin_err_test_exit,
- .test_cases = cs_dsp_bin_err_test_cases_halo,
+ .test_cases = cs_dsp_bin_err_test_cases,
};
static struct kunit_suite cs_dsp_bin_err_test_adsp2_32bit = {
.name = "cs_dsp_bin_err_adsp2_32bit",
.init = cs_dsp_bin_err_test_adsp2_32bit_init,
.exit = cs_dsp_bin_err_test_exit,
- .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+ .test_cases = cs_dsp_bin_err_test_cases,
};
static struct kunit_suite cs_dsp_bin_err_test_adsp2_16bit = {
.name = "cs_dsp_bin_err_adsp2_16bit",
.init = cs_dsp_bin_err_test_adsp2_16bit_init,
.exit = cs_dsp_bin_err_test_exit,
- .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+ .test_cases = cs_dsp_bin_err_test_cases,
};
kunit_test_suites(&cs_dsp_bin_err_test_halo,
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
index cb90964740ea..942ba1af5e7c 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
@@ -73,6 +73,18 @@ static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
.length_bytes = 4,
};
+static char *cs_dsp_ctl_alloc_test_string(struct kunit *test, char c, size_t len)
+{
+ char *str;
+
+ str = kunit_kmalloc(test, len + 1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, str);
+ memset(str, c, len);
+ str[len] = '\0';
+
+ return str;
+}
+
/* Algorithm info block without controls should load */
static void cs_dsp_ctl_parse_no_coeffs(struct kunit *test)
{
@@ -160,12 +172,8 @@ static void cs_dsp_ctl_parse_max_v1_name(struct kunit *test)
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *name;
- name = kunit_kzalloc(test, 256, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
- memset(name, 'A', 255);
- def.fullname = name;
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -252,14 +260,9 @@ static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
struct cs_dsp_test_local *local = priv->local;
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
- char *name;
struct firmware *wmfw;
- name = kunit_kmalloc(test, 255, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
- memset(name, 'A', 255);
-
- def.shortname = name;
+ def.shortname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -273,7 +276,7 @@ static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
KUNIT_ASSERT_NOT_NULL(test, ctl);
KUNIT_EXPECT_EQ(test, ctl->subname_len, 255);
- KUNIT_EXPECT_MEMEQ(test, ctl->subname, name, ctl->subname_len);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
KUNIT_EXPECT_EQ(test, ctl->type, def.type);
KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
@@ -323,12 +326,8 @@ static void cs_dsp_ctl_parse_with_max_fullname(struct kunit *test)
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *fullname;
- fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
- memset(fullname, 'A', 255);
- def.fullname = fullname;
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -392,12 +391,8 @@ static void cs_dsp_ctl_parse_with_max_description(struct kunit *test)
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *description;
- description = kunit_kmalloc(test, 65535, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
- memset(description, 'A', 65535);
- def.description = description;
+ def.description = cs_dsp_ctl_alloc_test_string(test, 'A', 65535);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -429,17 +424,9 @@ static void cs_dsp_ctl_parse_with_max_fullname_and_description(struct kunit *tes
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *fullname, *description;
-
- fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
- memset(fullname, 'A', 255);
- def.fullname = fullname;
- description = kunit_kmalloc(test, 65535, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
- memset(description, 'A', 65535);
- def.description = description;
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
+ def.description = cs_dsp_ctl_alloc_test_string(test, 'A', 65535);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 8d91997036e4..9cc963b2edc0 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -431,9 +431,9 @@ static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
}
}
-static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
+static ssize_t raw_event_log_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct dmi_sysfs_entry *entry = to_entry(kobj->parent);
struct dmi_read_state state = {
@@ -445,10 +445,7 @@ static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state);
}
-static struct bin_attribute dmi_sel_raw_attr = {
- .attr = {.name = "raw_event_log", .mode = 0400},
- .read = dmi_sel_raw_read,
-};
+static const BIN_ATTR_ADMIN_RO(raw_event_log, 0);
static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
{
@@ -464,7 +461,7 @@ static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
if (ret)
goto out_free;
- ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr);
+ ret = sysfs_create_bin_file(entry->child, &bin_attr_raw_event_log);
if (ret)
goto out_del;
@@ -537,10 +534,10 @@ static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry,
&state->pos, dh, entry_length);
}
-static ssize_t dmi_entry_raw_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
+static ssize_t raw_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct dmi_sysfs_entry *entry = to_entry(kobj);
struct dmi_read_state state = {
@@ -552,10 +549,7 @@ static ssize_t dmi_entry_raw_read(struct file *filp,
return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state);
}
-static const struct bin_attribute dmi_entry_raw_attr = {
- .attr = {.name = "raw", .mode = 0400},
- .read = dmi_entry_raw_read,
-};
+static const BIN_ATTR_ADMIN_RO(raw, 0);
static void dmi_sysfs_entry_release(struct kobject *kobj)
{
@@ -630,7 +624,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
goto out_err;
/* Create the raw binary file to access the entry */
- *ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr);
+ *ret = sysfs_create_bin_file(&entry->kobj, &bin_attr_raw);
if (*ret)
goto out_err;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index fde0656481cc..70d39adf50dc 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -761,8 +761,8 @@ static void __init dmi_scan_machine(void)
pr_info("DMI not present or invalid.\n");
}
-static BIN_ATTR_SIMPLE_ADMIN_RO(smbios_entry_point);
-static BIN_ATTR_SIMPLE_ADMIN_RO(DMI);
+static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(smbios_entry_point);
+static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(DMI);
static int __init dmi_init(void)
{
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index b69e68ef3f02..928409199a1a 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -24,7 +24,7 @@
#include <linux/bcd.h>
#include <acpi/ghes.h>
#include <ras/ras_event.h>
-#include "cper_cxl.h"
+#include <cxl/event.h>
/*
* CPER record ID need to be unique even after reboot, because record
@@ -624,11 +624,11 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
else
goto err_section_too_small;
} else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
- struct cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
+ struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: CXL Protocol Error\n", newpfx);
if (gdata->error_data_length >= sizeof(*prot_err))
- cper_print_prot_err(newpfx, prot_err);
+ cxl_cper_print_prot_err(newpfx, prot_err);
else
goto err_section_too_small;
} else {
diff --git a/drivers/firmware/efi/cper_cxl.c b/drivers/firmware/efi/cper_cxl.c
index a55771b99a97..8a7667faf953 100644
--- a/drivers/firmware/efi/cper_cxl.c
+++ b/drivers/firmware/efi/cper_cxl.c
@@ -8,26 +8,7 @@
*/
#include <linux/cper.h>
-#include "cper_cxl.h"
-
-#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
-#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
-#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
-#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
-#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
-#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
-#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
-
-/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
-struct cxl_ras_capability_regs {
- u32 uncor_status;
- u32 uncor_mask;
- u32 uncor_severity;
- u32 cor_status;
- u32 cor_mask;
- u32 cap_control;
- u32 header_log[16];
-};
+#include <cxl/event.h>
static const char * const prot_err_agent_type_strs[] = {
"Restricted CXL Device",
@@ -40,22 +21,8 @@ static const char * const prot_err_agent_type_strs[] = {
"CXL Upstream Switch Port",
};
-/*
- * The layout of the enumeration and the values matches CXL Agent Type
- * field in the UEFI 2.10 Section N.2.13,
- */
-enum {
- RCD, /* Restricted CXL Device */
- RCH_DP, /* Restricted CXL Host Downstream Port */
- DEVICE, /* CXL Device */
- LD, /* CXL Logical Device */
- FMLD, /* CXL Fabric Manager managed Logical Device */
- RP, /* CXL Root Port */
- DSP, /* CXL Downstream Switch Port */
- USP, /* CXL Upstream Switch Port */
-};
-
-void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err)
+void cxl_cper_print_prot_err(const char *pfx,
+ const struct cxl_cper_sec_prot_err *prot_err)
{
if (prot_err->valid_bits & PROT_ERR_VALID_AGENT_TYPE)
pr_info("%s agent_type: %d, %s\n", pfx, prot_err->agent_type,
diff --git a/drivers/firmware/efi/cper_cxl.h b/drivers/firmware/efi/cper_cxl.h
deleted file mode 100644
index 86bfcf7909ec..000000000000
--- a/drivers/firmware/efi/cper_cxl.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * UEFI Common Platform Error Record (CPER) support for CXL Section.
- *
- * Copyright (C) 2022 Advanced Micro Devices, Inc.
- *
- * Author: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
- */
-
-#ifndef LINUX_CPER_CXL_H
-#define LINUX_CPER_CXL_H
-
-/* CXL Protocol Error Section */
-#define CPER_SEC_CXL_PROT_ERR \
- GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
- 0x4B, 0x77, 0x10, 0x48)
-
-#pragma pack(1)
-
-/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
-struct cper_sec_prot_err {
- u64 valid_bits;
- u8 agent_type;
- u8 reserved[7];
-
- /*
- * Except for RCH Downstream Port, all the remaining CXL Agent
- * types are uniquely identified by the PCIe compatible SBDF number.
- */
- union {
- u64 rcrb_base_addr;
- struct {
- u8 function;
- u8 device;
- u8 bus;
- u16 segment;
- u8 reserved_1[3];
- };
- } agent_addr;
-
- struct {
- u16 vendor_id;
- u16 device_id;
- u16 subsystem_vendor_id;
- u16 subsystem_id;
- u8 class_code[2];
- u16 slot;
- u8 reserved_1[4];
- } device_id;
-
- struct {
- u32 lower_dw;
- u32 upper_dw;
- } dev_serial_num;
-
- u8 capability[60];
- u16 dvsec_len;
- u16 err_len;
- u8 reserved_2[4];
-};
-
-#pragma pack()
-
-void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err);
-
-#endif //__CPER_CXL_
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 4f9fb086eab7..0a7c764dcc61 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -47,7 +47,7 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier,
if (ret || !data)
return NOTIFY_DONE;
- wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+ wdata = kmalloc_array(MAX_DATA_LEN, sizeof(efi_char16_t), GFP_KERNEL);
if (!wdata)
return NOTIFY_DONE;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 1141cd06011f..d23a1b9fed75 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -62,6 +62,8 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
# `-fdata-sections` flag from KBUILD_CFLAGS_KERNEL
KBUILD_CFLAGS_KERNEL := $(filter-out -fdata-sections, $(KBUILD_CFLAGS_KERNEL))
+KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
file.o mem.o random.o randomalloc.o pci.o \
skip_spaces.o lib-cmdline.o lib-ctype.o \
@@ -83,13 +85,19 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o smbios.o
+lib-$(CONFIG_EFI_MIXED) += x86-mixed.o
lib-$(CONFIG_X86_64) += x86-5lvl.o
lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
-zboot-obj-$(CONFIG_RISCV) := lib-clz_ctz.o lib-ashldi3.o
+zboot-obj-y := zboot-decompress-gzip.o
+CFLAGS_zboot-decompress-gzip.o += -I$(srctree)/lib/zlib_inflate
+zboot-obj-$(CONFIG_KERNEL_ZSTD) := zboot-decompress-zstd.o lib-xxhash.o
+CFLAGS_zboot-decompress-zstd.o += -I$(srctree)/lib/zstd
+
+zboot-obj-$(CONFIG_RISCV) += lib-clz_ctz.o lib-ashldi3.o
lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y)
lib-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o bitmap.o find.o
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index d96d4494070d..f5ba032863a9 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -1234,4 +1234,7 @@ void process_unaccepted_memory(u64 start, u64 end);
void accept_memory(phys_addr_t start, unsigned long size);
void arch_accept_memory(phys_addr_t start, phys_addr_t end);
+efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size);
+efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen);
+
#endif
diff --git a/drivers/firmware/efi/libstub/intrinsics.c b/drivers/firmware/efi/libstub/intrinsics.c
index 965e734f6f98..418cd2e6dccc 100644
--- a/drivers/firmware/efi/libstub/intrinsics.c
+++ b/drivers/firmware/efi/libstub/intrinsics.c
@@ -15,8 +15,31 @@ void *__memmove(void *__dest, const void *__src, size_t count) __alias(memmove);
void *__memset(void *s, int c, size_t count) __alias(memset);
#endif
+static void *efistub_memmove(u8 *dst, const u8 *src, size_t len)
+{
+ if (src > dst || dst >= (src + len))
+ for (size_t i = 0; i < len; i++)
+ dst[i] = src[i];
+ else
+ for (ssize_t i = len - 1; i >= 0; i--)
+ dst[i] = src[i];
+
+ return dst;
+}
+
+static void *efistub_memset(void *dst, int c, size_t len)
+{
+ for (u8 *d = dst; len--; d++)
+ *d = c;
+
+ return dst;
+}
+
void *memcpy(void *dst, const void *src, size_t len)
{
+ if (efi_table_attr(efi_system_table, boottime) == NULL)
+ return efistub_memmove(dst, src, len);
+
efi_bs_call(copy_mem, dst, src, len);
return dst;
}
@@ -25,6 +48,9 @@ extern void *memmove(void *dst, const void *src, size_t len) __alias(memcpy);
void *memset(void *dst, int c, size_t len)
{
+ if (efi_table_attr(efi_system_table, boottime) == NULL)
+ return efistub_memset(dst, c, len);
+
efi_bs_call(set_mem, dst, len, c & U8_MAX);
return dst;
}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 5a732018be36..fd80b2f3233a 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -75,6 +75,10 @@ efi_status_t efi_random_alloc(unsigned long size,
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
+ /* Avoid address 0x0, as it can be mistaken for NULL */
+ if (alloc_min == 0)
+ alloc_min = align;
+
size = round_up(size, EFI_ALLOC_ALIGN);
/* count the suitable slots in each memory map entry */
diff --git a/drivers/firmware/efi/libstub/x86-mixed.S b/drivers/firmware/efi/libstub/x86-mixed.S
new file mode 100644
index 000000000000..e04ed99bc449
--- /dev/null
+++ b/drivers/firmware/efi/libstub/x86-mixed.S
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
+ *
+ * Early support for invoking 32-bit EFI services from a 64-bit kernel.
+ *
+ * Because this thunking occurs before ExitBootServices() we have to
+ * restore the firmware's 32-bit GDT and IDT before we make EFI service
+ * calls.
+ *
+ * On the plus side, we don't have to worry about mangling 64-bit
+ * addresses into 32-bits because we're executing with an identity
+ * mapped pagetable and haven't transitioned to 64-bit virtual addresses
+ * yet.
+ */
+
+#include <linux/linkage.h>
+#include <asm/desc_defs.h>
+#include <asm/msr.h>
+#include <asm/page_types.h>
+#include <asm/pgtable_types.h>
+#include <asm/processor-flags.h>
+#include <asm/segment.h>
+
+ .text
+ .code32
+#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+SYM_FUNC_START(efi32_stub_entry)
+ call 1f
+1: popl %ecx
+
+ /* Clear BSS */
+ xorl %eax, %eax
+ leal (_bss - 1b)(%ecx), %edi
+ leal (_ebss - 1b)(%ecx), %ecx
+ subl %edi, %ecx
+ shrl $2, %ecx
+ cld
+ rep stosl
+
+ add $0x4, %esp /* Discard return address */
+ movl 8(%esp), %ebx /* struct boot_params pointer */
+ jmp efi32_startup
+SYM_FUNC_END(efi32_stub_entry)
+#endif
+
+/*
+ * Called using a far call from __efi64_thunk() below, using the x86_64 SysV
+ * ABI (except for R8/R9 which are inaccessible to 32-bit code - EAX/EBX are
+ * used instead). EBP+16 points to the arguments passed via the stack.
+ *
+ * The first argument (EDI) is a pointer to the boot service or protocol, to
+ * which the remaining arguments are passed, each truncated to 32 bits.
+ */
+SYM_FUNC_START_LOCAL(efi_enter32)
+ /*
+ * Convert x86-64 SysV ABI params to i386 ABI
+ */
+ pushl 32(%ebp) /* Up to 3 args passed via the stack */
+ pushl 24(%ebp)
+ pushl 16(%ebp)
+ pushl %ebx /* R9 */
+ pushl %eax /* R8 */
+ pushl %ecx
+ pushl %edx
+ pushl %esi
+
+ /* Disable paging */
+ movl %cr0, %eax
+ btrl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+
+ /* Disable long mode via EFER */
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btrl $_EFER_LME, %eax
+ wrmsr
+
+ call *%edi
+
+ /* We must preserve return value */
+ movl %eax, %edi
+
+ call efi32_enable_long_mode
+
+ addl $32, %esp
+ movl %edi, %eax
+ lret
+SYM_FUNC_END(efi_enter32)
+
+ .code64
+SYM_FUNC_START(__efi64_thunk)
+ push %rbp
+ movl %esp, %ebp
+ push %rbx
+
+ /* Move args #5 and #6 into 32-bit accessible registers */
+ movl %r8d, %eax
+ movl %r9d, %ebx
+
+ lcalll *efi32_call(%rip)
+
+ pop %rbx
+ pop %rbp
+ RET
+SYM_FUNC_END(__efi64_thunk)
+
+ .code32
+SYM_FUNC_START_LOCAL(efi32_enable_long_mode)
+ movl %cr4, %eax
+ btsl $(X86_CR4_PAE_BIT), %eax
+ movl %eax, %cr4
+
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+
+ /* Disable interrupts - the firmware's IDT does not work in long mode */
+ cli
+
+ /* Enable paging */
+ movl %cr0, %eax
+ btsl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+ ret
+SYM_FUNC_END(efi32_enable_long_mode)
+
+/*
+ * This is the common EFI stub entry point for mixed mode. It sets up the GDT
+ * and page tables needed for 64-bit execution, after which it calls the
+ * common 64-bit EFI entrypoint efi_stub_entry().
+ *
+ * Arguments: 0(%esp) image handle
+ * 4(%esp) EFI system table pointer
+ * %ebx struct boot_params pointer (or NULL)
+ *
+ * Since this is the point of no return for ordinary execution, no registers
+ * are considered live except for the function parameters. [Note that the EFI
+ * stub may still exit and return to the firmware using the Exit() EFI boot
+ * service.]
+ */
+SYM_FUNC_START_LOCAL(efi32_startup)
+ movl %esp, %ebp
+
+ subl $8, %esp
+ sgdtl (%esp) /* Save GDT descriptor to the stack */
+ movl 2(%esp), %esi /* Existing GDT pointer */
+ movzwl (%esp), %ecx /* Existing GDT limit */
+ inc %ecx /* Existing GDT size */
+ andl $~7, %ecx /* Ensure size is multiple of 8 */
+
+ subl %ecx, %esp /* Allocate new GDT */
+ andl $~15, %esp /* Realign the stack */
+ movl %esp, %edi /* New GDT address */
+ leal 7(%ecx), %eax /* New GDT limit */
+ pushw %cx /* Push 64-bit CS (for LJMP below) */
+ pushl %edi /* Push new GDT address */
+ pushw %ax /* Push new GDT limit */
+
+ /* Copy GDT to the stack and add a 64-bit code segment at the end */
+ movl $GDT_ENTRY(DESC_CODE64, 0, 0xfffff) & 0xffffffff, (%edi,%ecx)
+ movl $GDT_ENTRY(DESC_CODE64, 0, 0xfffff) >> 32, 4(%edi,%ecx)
+ shrl $2, %ecx
+ cld
+ rep movsl /* Copy the firmware GDT */
+ lgdtl (%esp) /* Switch to the new GDT */
+
+ call 1f
+1: pop %edi
+
+ /* Record mixed mode entry */
+ movb $0x0, (efi_is64 - 1b)(%edi)
+
+ /* Set up indirect far call to re-enter 32-bit mode */
+ leal (efi32_call - 1b)(%edi), %eax
+ addl %eax, (%eax)
+ movw %cs, 4(%eax)
+
+ /* Disable paging */
+ movl %cr0, %eax
+ btrl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+
+ /* Set up 1:1 mapping */
+ leal (pte - 1b)(%edi), %eax
+ movl $_PAGE_PRESENT | _PAGE_RW | _PAGE_PSE, %ecx
+ leal (_PAGE_PRESENT | _PAGE_RW)(%eax), %edx
+2: movl %ecx, (%eax)
+ addl $8, %eax
+ addl $PMD_SIZE, %ecx
+ jnc 2b
+
+ movl $PAGE_SIZE, %ecx
+ .irpc l, 0123
+ movl %edx, \l * 8(%eax)
+ addl %ecx, %edx
+ .endr
+ addl %ecx, %eax
+ movl %edx, (%eax)
+ movl %eax, %cr3
+
+ call efi32_enable_long_mode
+
+ /* Set up far jump to 64-bit mode (CS is already on the stack) */
+ leal (efi_stub_entry - 1b)(%edi), %eax
+ movl %eax, 2(%esp)
+
+ movl 0(%ebp), %edi
+ movl 4(%ebp), %esi
+ movl %ebx, %edx
+ ljmpl *2(%esp)
+SYM_FUNC_END(efi32_startup)
+
+/*
+ * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
+ * efi_system_table_32_t *sys_table)
+ */
+SYM_FUNC_START(efi32_pe_entry)
+ pushl %ebx // save callee-save registers
+
+ /* Check whether the CPU supports long mode */
+ movl $0x80000001, %eax // assume extended info support
+ cpuid
+ btl $29, %edx // check long mode bit
+ jnc 1f
+ leal 8(%esp), %esp // preserve stack alignment
+ xor %ebx, %ebx // no struct boot_params pointer
+ jmp efi32_startup // only ESP and EBX remain live
+1: movl $0x80000003, %eax // EFI_UNSUPPORTED
+ popl %ebx
+ RET
+SYM_FUNC_END(efi32_pe_entry)
+
+#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+ .org efi32_stub_entry + 0x200
+ .code64
+SYM_FUNC_START_NOALIGN(efi64_stub_entry)
+ jmp efi_handover_entry
+SYM_FUNC_END(efi64_stub_entry)
+#endif
+
+ .data
+ .balign 8
+SYM_DATA_START_LOCAL(efi32_call)
+ .long efi_enter32 - .
+ .word 0x0
+SYM_DATA_END(efi32_call)
+SYM_DATA(efi_is64, .byte 1)
+
+ .bss
+ .balign PAGE_SIZE
+SYM_DATA_LOCAL(pte, .fill 6 * PAGE_SIZE, 1, 0)
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 863910e9eefc..cafc90d4caaf 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -397,17 +397,13 @@ static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
asm("hlt");
}
-void __noreturn efi_stub_entry(efi_handle_t handle,
- efi_system_table_t *sys_table_arg,
- struct boot_params *boot_params);
-
/*
* Because the x86 boot code expects to be passed a boot_params we
* need to create one ourselves (usually the bootloader would create
* one for us).
*/
-efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
- efi_system_table_t *sys_table_arg)
+static efi_status_t efi_allocate_bootparams(efi_handle_t handle,
+ struct boot_params **bp)
{
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
struct boot_params *boot_params;
@@ -416,21 +412,15 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
unsigned long alloc;
char *cmdline_ptr;
- efi_system_table = sys_table_arg;
-
- /* Check if we were booted by the EFI firmware */
- if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
- efi_exit(handle, EFI_INVALID_PARAMETER);
-
status = efi_bs_call(handle_protocol, handle, &proto, (void **)&image);
if (status != EFI_SUCCESS) {
efi_err("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
- efi_exit(handle, status);
+ return status;
}
status = efi_allocate_pages(PARAM_SIZE, &alloc, ULONG_MAX);
if (status != EFI_SUCCESS)
- efi_exit(handle, status);
+ return status;
boot_params = memset((void *)alloc, 0x0, PARAM_SIZE);
hdr = &boot_params->hdr;
@@ -446,14 +436,14 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
cmdline_ptr = efi_convert_cmdline(image);
if (!cmdline_ptr) {
efi_free(PARAM_SIZE, alloc);
- efi_exit(handle, EFI_OUT_OF_RESOURCES);
+ return EFI_OUT_OF_RESOURCES;
}
efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr,
&boot_params->ext_cmd_line_ptr);
- efi_stub_entry(handle, sys_table_arg, boot_params);
- /* not reached */
+ *bp = boot_params;
+ return EFI_SUCCESS;
}
static void add_e820ext(struct boot_params *params,
@@ -740,13 +730,16 @@ static efi_status_t parse_options(const char *cmdline)
return efi_parse_options(cmdline);
}
-static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry,
+ struct boot_params *boot_params)
{
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
unsigned long addr, alloc_size, entry;
efi_status_t status;
u32 seed[2] = {};
+ boot_params_ptr = boot_params;
+
/* determine the required size of the allocation */
alloc_size = ALIGN(max_t(unsigned long, output_len, kernel_total_size),
MIN_KERNEL_ALIGN);
@@ -777,7 +770,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
seed[0] = 0;
}
- boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
+ boot_params->hdr.loadflags |= KASLR_FLAG;
}
status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
@@ -815,20 +808,27 @@ static void __noreturn enter_kernel(unsigned long kernel_addr,
void __noreturn efi_stub_entry(efi_handle_t handle,
efi_system_table_t *sys_table_arg,
struct boot_params *boot_params)
+
{
efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
- struct setup_header *hdr = &boot_params->hdr;
const struct linux_efi_initrd *initrd = NULL;
unsigned long kernel_entry;
+ struct setup_header *hdr;
efi_status_t status;
- boot_params_ptr = boot_params;
-
efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
efi_exit(handle, EFI_INVALID_PARAMETER);
+ if (!IS_ENABLED(CONFIG_EFI_HANDOVER_PROTOCOL) || !boot_params) {
+ status = efi_allocate_bootparams(handle, &boot_params);
+ if (status != EFI_SUCCESS)
+ efi_exit(handle, status);
+ }
+
+ hdr = &boot_params->hdr;
+
if (have_unsupported_snp_features())
efi_exit(handle, EFI_UNSUPPORTED);
@@ -870,7 +870,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
if (efi_mem_encrypt > 0)
hdr->xloadflags |= XLF_MEM_ENCRYPTION;
- status = efi_decompress_kernel(&kernel_entry);
+ status = efi_decompress_kernel(&kernel_entry, boot_params);
if (status != EFI_SUCCESS) {
efi_err("Failed to decompress kernel\n");
goto fail;
@@ -940,6 +940,12 @@ fail:
efi_exit(handle, status);
}
+efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg)
+{
+ efi_stub_entry(handle, sys_table_arg, NULL);
+}
+
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
struct boot_params *boot_params)
diff --git a/drivers/firmware/efi/libstub/zboot-decompress-gzip.c b/drivers/firmware/efi/libstub/zboot-decompress-gzip.c
new file mode 100644
index 000000000000..e97a7e9d3c98
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot-decompress-gzip.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/zlib.h>
+
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+#include "inftrees.c"
+#include "inffast.c"
+#include "inflate.c"
+
+extern unsigned char _gzdata_start[], _gzdata_end[];
+extern u32 __aligned(1) payload_size;
+
+static struct z_stream_s stream;
+
+efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size)
+{
+ efi_status_t status;
+ int rc;
+
+ /* skip the 10 byte header, assume no recorded filename */
+ stream.next_in = _gzdata_start + 10;
+ stream.avail_in = _gzdata_end - stream.next_in;
+
+ status = efi_allocate_pages(zlib_inflate_workspacesize(),
+ (unsigned long *)&stream.workspace,
+ ULONG_MAX);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ rc = zlib_inflateInit2(&stream, -MAX_WBITS);
+ if (rc != Z_OK) {
+ efi_err("failed to initialize GZIP decompressor: %d\n", rc);
+ status = EFI_LOAD_ERROR;
+ goto out;
+ }
+
+ *alloc_size = payload_size;
+ return EFI_SUCCESS;
+out:
+ efi_free(zlib_inflate_workspacesize(), (unsigned long)stream.workspace);
+ return status;
+}
+
+efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen)
+{
+ int rc;
+
+ stream.next_out = out;
+ stream.avail_out = outlen;
+
+ rc = zlib_inflate(&stream, 0);
+ zlib_inflateEnd(&stream);
+
+ efi_free(zlib_inflate_workspacesize(), (unsigned long)stream.workspace);
+
+ if (rc != Z_STREAM_END) {
+ efi_err("GZIP decompression failed with status %d\n", rc);
+ return EFI_LOAD_ERROR;
+ }
+
+ efi_cache_sync_image((unsigned long)out, outlen);
+
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/zboot-decompress-zstd.c b/drivers/firmware/efi/libstub/zboot-decompress-zstd.c
new file mode 100644
index 000000000000..bde9d94dd2e3
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot-decompress-zstd.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/zstd.h>
+
+#include <asm/efi.h>
+
+#include "decompress_sources.h"
+#include "efistub.h"
+
+extern unsigned char _gzdata_start[], _gzdata_end[];
+extern u32 __aligned(1) payload_size;
+
+static size_t wksp_size;
+static void *wksp;
+
+efi_status_t efi_zboot_decompress_init(unsigned long *alloc_size)
+{
+ efi_status_t status;
+
+ wksp_size = zstd_dctx_workspace_bound();
+ status = efi_allocate_pages(wksp_size, (unsigned long *)&wksp, ULONG_MAX);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *alloc_size = payload_size;
+ return EFI_SUCCESS;
+}
+
+efi_status_t efi_zboot_decompress(u8 *out, unsigned long outlen)
+{
+ zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
+ size_t ret;
+ int retval;
+
+ ret = zstd_decompress_dctx(dctx, out, outlen, _gzdata_start,
+ _gzdata_end - _gzdata_start - 4);
+ efi_free(wksp_size, (unsigned long)wksp);
+
+ retval = zstd_get_error_code(ret);
+ if (retval) {
+ efi_err("ZSTD-decompression failed with status %d\n", retval);
+ return EFI_LOAD_ERROR;
+ }
+
+ efi_cache_sync_image((unsigned long)out, outlen);
+
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
index af23b3c50228..c47ace06f010 100644
--- a/drivers/firmware/efi/libstub/zboot.c
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -7,36 +7,6 @@
#include "efistub.h"
-static unsigned char zboot_heap[SZ_256K] __aligned(64);
-static unsigned long free_mem_ptr, free_mem_end_ptr;
-
-#define STATIC static
-#if defined(CONFIG_KERNEL_GZIP)
-#include "../../../../lib/decompress_inflate.c"
-#elif defined(CONFIG_KERNEL_LZ4)
-#include "../../../../lib/decompress_unlz4.c"
-#elif defined(CONFIG_KERNEL_LZMA)
-#include "../../../../lib/decompress_unlzma.c"
-#elif defined(CONFIG_KERNEL_LZO)
-#include "../../../../lib/decompress_unlzo.c"
-#elif defined(CONFIG_KERNEL_XZ)
-#undef memcpy
-#define memcpy memcpy
-#undef memmove
-#define memmove memmove
-#include "../../../../lib/decompress_unxz.c"
-#elif defined(CONFIG_KERNEL_ZSTD)
-#include "../../../../lib/decompress_unzstd.c"
-#endif
-
-extern char efi_zboot_header[];
-extern char _gzdata_start[], _gzdata_end[];
-
-static void error(char *x)
-{
- efi_err("EFI decompressor: %s\n", x);
-}
-
static unsigned long alloc_preferred_address(unsigned long alloc_size)
{
#ifdef EFI_KIMG_PREFERRED_ADDRESS
@@ -64,22 +34,17 @@ struct screen_info *alloc_screen_info(void)
asmlinkage efi_status_t __efiapi
efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
{
- unsigned long compressed_size = _gzdata_end - _gzdata_start;
+ char *cmdline_ptr __free(efi_pool) = NULL;
unsigned long image_base, alloc_size;
efi_loaded_image_t *image;
efi_status_t status;
- char *cmdline_ptr;
- int ret;
WRITE_ONCE(efi_system_table, systab);
- free_mem_ptr = (unsigned long)&zboot_heap;
- free_mem_end_ptr = free_mem_ptr + sizeof(zboot_heap);
-
status = efi_bs_call(handle_protocol, handle,
&LOADED_IMAGE_PROTOCOL_GUID, (void **)&image);
if (status != EFI_SUCCESS) {
- error("Failed to locate parent's loaded image protocol");
+ efi_err("Failed to locate parent's loaded image protocol\n");
return status;
}
@@ -89,9 +54,9 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
efi_info("Decompressing Linux Kernel...\n");
- // SizeOfImage from the compressee's PE/COFF header
- alloc_size = round_up(get_unaligned_le32(_gzdata_end - 4),
- EFI_ALLOC_ALIGN);
+ status = efi_zboot_decompress_init(&alloc_size);
+ if (status != EFI_SUCCESS)
+ return status;
// If the architecture has a preferred address for the image,
// try that first.
@@ -122,26 +87,14 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS) {
efi_err("Failed to allocate memory\n");
- goto free_cmdline;
+ return status;
}
}
- // Decompress the payload into the newly allocated buffer.
- ret = __decompress(_gzdata_start, compressed_size, NULL, NULL,
- (void *)image_base, alloc_size, NULL, error);
- if (ret < 0) {
- error("Decompression failed");
- status = EFI_DEVICE_ERROR;
- goto free_image;
- }
-
- efi_cache_sync_image(image_base, alloc_size);
-
- status = efi_stub_common(handle, image, image_base, cmdline_ptr);
+ // Decompress the payload into the newly allocated buffer
+ status = efi_zboot_decompress((void *)image_base, alloc_size) ?:
+ efi_stub_common(handle, image, image_base, cmdline_ptr);
-free_image:
efi_free(alloc_size, image_base);
-free_cmdline:
- efi_bs_call(free_pool, cmdline_ptr);
return status;
}
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index af2c82f7bd90..9ecc57ff5b45 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -17,6 +17,7 @@ SECTIONS
.rodata : ALIGN(8) {
__efistub__gzdata_start = .;
*(.gzdata)
+ __efistub_payload_size = . - 4;
__efistub__gzdata_end = .;
*(.rodata* .init.rodata* .srodata*)
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index 208db29613c6..0a856c3f69a3 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -263,7 +263,7 @@ struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name)
* amount of data in this mokvar config table entry.
*/
static ssize_t efi_mokvar_sysfs_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct efi_mokvar_table_entry *mokvar_entry = bin_attr->private;
@@ -340,7 +340,7 @@ static int __init efi_mokvar_sysfs_init(void)
mokvar_sysfs->bin_attr.attr.name = mokvar_entry->name;
mokvar_sysfs->bin_attr.attr.mode = 0400;
mokvar_sysfs->bin_attr.size = mokvar_entry->data_size;
- mokvar_sysfs->bin_attr.read = efi_mokvar_sysfs_read;
+ mokvar_sysfs->bin_attr.read_new = efi_mokvar_sysfs_read;
err = sysfs_create_bin_file(mokvar_kobj,
&mokvar_sysfs->bin_attr);
diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c
index 4fd45d6f69a4..c1bedd244817 100644
--- a/drivers/firmware/efi/rci2-table.c
+++ b/drivers/firmware/efi/rci2-table.c
@@ -40,7 +40,7 @@ static u8 *rci2_base;
static u32 rci2_table_len;
unsigned long rci2_table_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
-static BIN_ATTR_SIMPLE_ADMIN_RO(rci2);
+static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(rci2);
static u16 checksum(void)
{
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 1dd4362ef9a3..8c28e25ddc8a 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -280,6 +280,7 @@ static int imx_scu_probe(struct platform_device *pdev)
return ret;
sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
+ of_node_put(args.np);
num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
for (i = 0; i < num_channel; i++) {
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 116eb465cdb4..b662b7e28b80 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -342,7 +342,7 @@ static int suspend_test_thread(void *arg)
* Disable the timer to make sure that the timer will not trigger
* later.
*/
- del_timer(&wakeup_timer);
+ timer_delete(&wakeup_timer);
destroy_timer_on_stack(&wakeup_timer);
if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 447246bd04be..98a463e9774b 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -814,15 +814,6 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
qcuefi->client = container_of(aux_dev, struct qseecom_client, aux_dev);
- auxiliary_set_drvdata(aux_dev, qcuefi);
- status = qcuefi_set_reference(qcuefi);
- if (status)
- return status;
-
- status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
- if (status)
- qcuefi_set_reference(NULL);
-
memset(&pool_config, 0, sizeof(pool_config));
pool_config.initial_size = SZ_4K;
pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER;
@@ -833,6 +824,15 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
if (IS_ERR(qcuefi->mempool))
return PTR_ERR(qcuefi->mempool);
+ auxiliary_set_drvdata(aux_dev, qcuefi);
+ status = qcuefi_set_reference(qcuefi);
+ if (status)
+ return status;
+
+ status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
+ if (status)
+ qcuefi_set_reference(NULL);
+
return status;
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index f0569bb9411f..fc4d67e4c4a6 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -2301,8 +2301,8 @@ static int qcom_scm_probe(struct platform_device *pdev)
__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
if (IS_ERR(__scm->mempool)) {
- dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
- "Failed to create the SCM memory pool\n");
+ ret = dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+ "Failed to create the SCM memory pool\n");
goto err;
}
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index d58da3e4500a..2615fb780e3c 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -460,7 +460,7 @@ static const struct kobj_type fw_cfg_sysfs_entry_ktype = {
/* raw-read method and attribute */
static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
@@ -474,9 +474,9 @@ static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
return fw_cfg_read_blob(entry->select, buf, pos, count);
}
-static struct bin_attribute fw_cfg_sysfs_attr_raw = {
+static const struct bin_attribute fw_cfg_sysfs_attr_raw = {
.attr = { .name = "raw", .mode = S_IRUSR },
- .read = fw_cfg_sysfs_read_raw,
+ .read_new = fw_cfg_sysfs_read_raw,
};
/*
diff --git a/drivers/firmware/samsung/Kconfig b/drivers/firmware/samsung/Kconfig
new file mode 100644
index 000000000000..16d81aeb1d41
--- /dev/null
+++ b/drivers/firmware/samsung/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config EXYNOS_ACPM_PROTOCOL
+ tristate "Exynos Alive Clock and Power Manager (ACPM) Message Protocol"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on MAILBOX
+ help
+ Alive Clock and Power Manager (ACPM) Message Protocol is defined for
+ the purpose of communication between the ACPM firmware and masters
+ (AP, AOC, ...). ACPM firmware operates on the Active Power Management
+ (APM) module that handles overall power activities.
+
+ This protocol driver provides interface for all the client drivers
+ making use of the features offered by the APM.
diff --git a/drivers/firmware/samsung/Makefile b/drivers/firmware/samsung/Makefile
new file mode 100644
index 000000000000..7b4c9f6f34f5
--- /dev/null
+++ b/drivers/firmware/samsung/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+acpm-protocol-objs := exynos-acpm.o exynos-acpm-pmic.o
+obj-$(CONFIG_EXYNOS_ACPM_PROTOCOL) += acpm-protocol.o
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
new file mode 100644
index 000000000000..85e90d236da2
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#include <linux/bitfield.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-pmic.h"
+
+#define ACPM_PMIC_CHANNEL GENMASK(15, 12)
+#define ACPM_PMIC_TYPE GENMASK(11, 8)
+#define ACPM_PMIC_REG GENMASK(7, 0)
+
+#define ACPM_PMIC_RETURN GENMASK(31, 24)
+#define ACPM_PMIC_MASK GENMASK(23, 16)
+#define ACPM_PMIC_VALUE GENMASK(15, 8)
+#define ACPM_PMIC_FUNC GENMASK(7, 0)
+
+#define ACPM_PMIC_BULK_SHIFT 8
+#define ACPM_PMIC_BULK_MASK GENMASK(7, 0)
+#define ACPM_PMIC_BULK_MAX_COUNT 8
+
+enum exynos_acpm_pmic_func {
+ ACPM_PMIC_READ,
+ ACPM_PMIC_WRITE,
+ ACPM_PMIC_UPDATE,
+ ACPM_PMIC_BULK_READ,
+ ACPM_PMIC_BULK_WRITE,
+};
+
+static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i)
+{
+ return (data & ACPM_PMIC_BULK_MASK) << (ACPM_PMIC_BULK_SHIFT * i);
+}
+
+static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i)
+{
+ return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK;
+}
+
+static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd,
+ unsigned int acpm_chan_id)
+{
+ xfer->txd = cmd;
+ xfer->rxd = cmd;
+ xfer->txlen = sizeof(cmd);
+ xfer->rxlen = sizeof(cmd);
+ xfer->acpm_chan_id = acpm_chan_id;
+}
+
+static void acpm_pmic_init_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_READ);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_read_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_read_cmd(cmd, type, reg, chan);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ *buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]);
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 count)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_READ) |
+ FIELD_PREP(ACPM_PMIC_VALUE, count);
+}
+
+int acpm_pmic_bulk_read(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int i, ret;
+
+ if (count > ACPM_PMIC_BULK_MAX_COUNT)
+ return -EINVAL;
+
+ acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ ret = FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ if (i < 4)
+ buf[i] = acpm_pmic_get_bulk(xfer.rxd[2], i);
+ else
+ buf[i] = acpm_pmic_get_bulk(xfer.rxd[3], i - 4);
+ }
+
+ return 0;
+}
+
+static void acpm_pmic_init_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 value)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_WRITE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, value);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_write_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_write_cmd(cmd, type, reg, chan, value);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf)
+{
+ int i;
+
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_WRITE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, count);
+
+ for (i = 0; i < count; i++) {
+ if (i < 4)
+ cmd[2] |= acpm_pmic_set_bulk(buf[i], i);
+ else
+ cmd[3] |= acpm_pmic_set_bulk(buf[i], i - 4);
+ }
+}
+
+int acpm_pmic_bulk_write(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ if (count > ACPM_PMIC_BULK_MAX_COUNT)
+ return -EINVAL;
+
+ acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_UPDATE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, value) |
+ FIELD_PREP(ACPM_PMIC_MASK, mask);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_update_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.h b/drivers/firmware/samsung/exynos-acpm-pmic.h
new file mode 100644
index 000000000000..078421888a14
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_PMIC_H__
+#define __EXYNOS_ACPM_PMIC_H__
+
+#include <linux/types.h>
+
+struct acpm_handle;
+
+int acpm_pmic_read_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf);
+int acpm_pmic_bulk_read(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf);
+int acpm_pmic_write_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value);
+int acpm_pmic_bulk_write(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf);
+int acpm_pmic_update_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask);
+#endif /* __EXYNOS_ACPM_PMIC_H__ */
diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c
new file mode 100644
index 000000000000..a85b2dbdd9f0
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mailbox/exynos-message.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/math.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-pmic.h"
+
+#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
+
+/* The unit of counter is 20 us. 5000 * 20 = 100 ms */
+#define ACPM_POLL_TIMEOUT 5000
+#define ACPM_TX_TIMEOUT_US 500000
+
+#define ACPM_GS101_INITDATA_BASE 0xa000
+
+/**
+ * struct acpm_shmem - shared memory configuration information.
+ * @reserved: unused fields.
+ * @chans: offset to array of struct acpm_chan_shmem.
+ * @reserved1: unused fields.
+ * @num_chans: number of channels.
+ */
+struct acpm_shmem {
+ u32 reserved[2];
+ u32 chans;
+ u32 reserved1[3];
+ u32 num_chans;
+};
+
+/**
+ * struct acpm_chan_shmem - descriptor of a shared memory channel.
+ *
+ * @id: channel ID.
+ * @reserved: unused fields.
+ * @rx_rear: rear pointer of APM RX queue (TX for AP).
+ * @rx_front: front pointer of APM RX queue (TX for AP).
+ * @rx_base: base address of APM RX queue (TX for AP).
+ * @reserved1: unused fields.
+ * @tx_rear: rear pointer of APM TX queue (RX for AP).
+ * @tx_front: front pointer of APM TX queue (RX for AP).
+ * @tx_base: base address of APM TX queue (RX for AP).
+ * @qlen: queue length. Applies to both TX/RX queues.
+ * @mlen: message length. Applies to both TX/RX queues.
+ * @reserved2: unused fields.
+ * @poll_completion: true when the channel works on polling.
+ */
+struct acpm_chan_shmem {
+ u32 id;
+ u32 reserved[3];
+ u32 rx_rear;
+ u32 rx_front;
+ u32 rx_base;
+ u32 reserved1[3];
+ u32 tx_rear;
+ u32 tx_front;
+ u32 tx_base;
+ u32 qlen;
+ u32 mlen;
+ u32 reserved2[2];
+ u32 poll_completion;
+};
+
+/**
+ * struct acpm_queue - exynos acpm queue.
+ *
+ * @rear: rear address of the queue.
+ * @front: front address of the queue.
+ * @base: base address of the queue.
+ */
+struct acpm_queue {
+ void __iomem *rear;
+ void __iomem *front;
+ void __iomem *base;
+};
+
+/**
+ * struct acpm_rx_data - RX queue data.
+ *
+ * @cmd: pointer to where the data shall be saved.
+ * @n_cmd: number of 32-bit commands.
+ * @response: true if the client expects the RX data.
+ */
+struct acpm_rx_data {
+ u32 *cmd;
+ size_t n_cmd;
+ bool response;
+};
+
+#define ACPM_SEQNUM_MAX 64
+
+/**
+ * struct acpm_chan - driver internal representation of a channel.
+ * @cl: mailbox client.
+ * @chan: mailbox channel.
+ * @acpm: pointer to driver private data.
+ * @tx: TX queue. The enqueue is done by the host.
+ * - front index is written by the host.
+ * - rear index is written by the firmware.
+ *
+ * @rx: RX queue. The enqueue is done by the firmware.
+ * - front index is written by the firmware.
+ * - rear index is written by the host.
+ * @tx_lock: protects TX queue.
+ * @rx_lock: protects RX queue.
+ * @qlen: queue length. Applies to both TX/RX queues.
+ * @mlen: message length. Applies to both TX/RX queues.
+ * @seqnum: sequence number of the last message enqueued on TX queue.
+ * @id: channel ID.
+ * @poll_completion: indicates if the transfer needs to be polled for
+ * completion or interrupt mode is used.
+ * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues.
+ * @rx_data: internal buffer used to drain the RX queue.
+ */
+struct acpm_chan {
+ struct mbox_client cl;
+ struct mbox_chan *chan;
+ struct acpm_info *acpm;
+ struct acpm_queue tx;
+ struct acpm_queue rx;
+ struct mutex tx_lock;
+ struct mutex rx_lock;
+
+ unsigned int qlen;
+ unsigned int mlen;
+ u8 seqnum;
+ u8 id;
+ bool poll_completion;
+
+ DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1);
+ struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX];
+};
+
+/**
+ * struct acpm_info - driver's private data.
+ * @shmem: pointer to the SRAM configuration data.
+ * @sram_base: base address of SRAM.
+ * @chans: pointer to the ACPM channel parameters retrieved from SRAM.
+ * @dev: pointer to the exynos-acpm device.
+ * @handle: instance of acpm_handle to send to clients.
+ * @num_chans: number of channels available for this controller.
+ */
+struct acpm_info {
+ struct acpm_shmem __iomem *shmem;
+ void __iomem *sram_base;
+ struct acpm_chan *chans;
+ struct device *dev;
+ struct acpm_handle handle;
+ u32 num_chans;
+};
+
+/**
+ * struct acpm_match_data - of_device_id data.
+ * @initdata_base: offset in SRAM where the channels configuration resides.
+ */
+struct acpm_match_data {
+ loff_t initdata_base;
+};
+
+#define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
+#define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
+
+/**
+ * acpm_get_rx() - get response from RX queue.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer to get response for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
+{
+ u32 rx_front, rx_seqnum, tx_seqnum, seqnum;
+ const void __iomem *base, *addr;
+ struct acpm_rx_data *rx_data;
+ u32 i, val, mlen;
+ bool rx_set = false;
+
+ guard(mutex)(&achan->rx_lock);
+
+ rx_front = readl(achan->rx.front);
+ i = readl(achan->rx.rear);
+
+ /* Bail out if RX is empty. */
+ if (i == rx_front)
+ return 0;
+
+ base = achan->rx.base;
+ mlen = achan->mlen;
+
+ tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ /* Drain RX queue. */
+ do {
+ /* Read RX seqnum. */
+ addr = base + mlen * i;
+ val = readl(addr);
+
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val);
+ if (!rx_seqnum)
+ return -EIO;
+ /*
+ * mssg seqnum starts with value 1, whereas the driver considers
+ * the first mssg at index 0.
+ */
+ seqnum = rx_seqnum - 1;
+ rx_data = &achan->rx_data[seqnum];
+
+ if (rx_data->response) {
+ if (rx_seqnum == tx_seqnum) {
+ __ioread32_copy(xfer->rxd, addr,
+ xfer->rxlen / 4);
+ rx_set = true;
+ clear_bit(seqnum, achan->bitmap_seqnum);
+ } else {
+ /*
+ * The RX data corresponds to another request.
+ * Save the data to drain the queue, but don't
+ * clear yet the bitmap. It will be cleared
+ * after the response is copied to the request.
+ */
+ __ioread32_copy(rx_data->cmd, addr,
+ xfer->rxlen / 4);
+ }
+ } else {
+ clear_bit(seqnum, achan->bitmap_seqnum);
+ }
+
+ i = (i + 1) % achan->qlen;
+ } while (i != rx_front);
+
+ /* We saved all responses, mark RX empty. */
+ writel(rx_front, achan->rx.rear);
+
+ /*
+ * If the response was not in this iteration of the queue, check if the
+ * RX data was previously saved.
+ */
+ rx_data = &achan->rx_data[tx_seqnum - 1];
+ if (!rx_set && rx_data->response) {
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM,
+ rx_data->cmd[0]);
+
+ if (rx_seqnum == tx_seqnum) {
+ memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
+ clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_dequeue_by_polling() - RX dequeue by polling.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being waited for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_dequeue_by_polling(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ struct device *dev = achan->acpm->dev;
+ unsigned int cnt_20us = 0;
+ u32 seqnum;
+ int ret;
+
+ seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ do {
+ ret = acpm_get_rx(achan, xfer);
+ if (ret)
+ return ret;
+
+ if (!test_bit(seqnum - 1, achan->bitmap_seqnum))
+ return 0;
+
+ /* Determined experimentally. */
+ usleep_range(20, 30);
+ cnt_20us++;
+ } while (cnt_20us < ACPM_POLL_TIMEOUT);
+
+ dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n",
+ achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us);
+
+ return -ETIME;
+}
+
+/**
+ * acpm_wait_for_queue_slots() - wait for queue slots.
+ *
+ * @achan: ACPM channel info.
+ * @next_tx_front: next front index of the TX queue.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front)
+{
+ u32 val, ret;
+
+ /*
+ * Wait for RX front to keep up with TX front. Make sure there's at
+ * least one element between them.
+ */
+ ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0,
+ ACPM_TX_TIMEOUT_US);
+ if (ret) {
+ dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n");
+ return ret;
+ }
+
+ ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0,
+ ACPM_TX_TIMEOUT_US);
+ if (ret)
+ dev_err(achan->acpm->dev, "TX queue is full.\n");
+
+ return ret;
+}
+
+/**
+ * acpm_prepare_xfer() - prepare a transfer before writing the message to the
+ * TX queue.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being prepared.
+ */
+static void acpm_prepare_xfer(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ struct acpm_rx_data *rx_data;
+ u32 *txd = (u32 *)xfer->txd;
+
+ /* Prevent chan->seqnum from being re-used */
+ do {
+ if (++achan->seqnum == ACPM_SEQNUM_MAX)
+ achan->seqnum = 1;
+ } while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum));
+
+ txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum);
+
+ /* Clear data for upcoming responses */
+ rx_data = &achan->rx_data[achan->seqnum - 1];
+ memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd);
+ if (xfer->rxd)
+ rx_data->response = true;
+
+ /* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */
+ set_bit(achan->seqnum - 1, achan->bitmap_seqnum);
+}
+
+/**
+ * acpm_wait_for_message_response - an helper to group all possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being waited for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_wait_for_message_response(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ /* Just polling mode supported for now. */
+ return acpm_dequeue_by_polling(achan, xfer);
+}
+
+/**
+ * acpm_do_xfer() - do one transfer.
+ * @handle: pointer to the acpm handle.
+ * @xfer: transfer to initiate and wait for response.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
+{
+ struct acpm_info *acpm = handle_to_acpm_info(handle);
+ struct exynos_mbox_msg msg;
+ struct acpm_chan *achan;
+ u32 idx, tx_front;
+ int ret;
+
+ if (xfer->acpm_chan_id >= acpm->num_chans)
+ return -EINVAL;
+
+ achan = &acpm->chans[xfer->acpm_chan_id];
+
+ if (!xfer->txd || xfer->txlen > achan->mlen || xfer->rxlen > achan->mlen)
+ return -EINVAL;
+
+ if (!achan->poll_completion) {
+ dev_err(achan->acpm->dev, "Interrupt mode not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ scoped_guard(mutex, &achan->tx_lock) {
+ tx_front = readl(achan->tx.front);
+ idx = (tx_front + 1) % achan->qlen;
+
+ ret = acpm_wait_for_queue_slots(achan, idx);
+ if (ret)
+ return ret;
+
+ acpm_prepare_xfer(achan, xfer);
+
+ /* Write TX command. */
+ __iowrite32_copy(achan->tx.base + achan->mlen * tx_front,
+ xfer->txd, xfer->txlen / 4);
+
+ /* Advance TX front. */
+ writel(idx, achan->tx.front);
+ }
+
+ msg.chan_id = xfer->acpm_chan_id;
+ msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
+ ret = mbox_send_message(achan->chan, (void *)&msg);
+ if (ret < 0)
+ return ret;
+
+ ret = acpm_wait_for_message_response(achan, xfer);
+
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(achan->chan, ret);
+
+ return ret;
+}
+
+/**
+ * acpm_chan_shmem_get_params() - get channel parameters and addresses of the
+ * TX/RX queues.
+ * @achan: ACPM channel info.
+ * @chan_shmem: __iomem pointer to a channel described in shared memory.
+ */
+static void acpm_chan_shmem_get_params(struct acpm_chan *achan,
+ struct acpm_chan_shmem __iomem *chan_shmem)
+{
+ void __iomem *base = achan->acpm->sram_base;
+ struct acpm_queue *rx = &achan->rx;
+ struct acpm_queue *tx = &achan->tx;
+
+ achan->mlen = readl(&chan_shmem->mlen);
+ achan->poll_completion = readl(&chan_shmem->poll_completion);
+ achan->id = readl(&chan_shmem->id);
+ achan->qlen = readl(&chan_shmem->qlen);
+
+ tx->base = base + readl(&chan_shmem->rx_base);
+ tx->rear = base + readl(&chan_shmem->rx_rear);
+ tx->front = base + readl(&chan_shmem->rx_front);
+
+ rx->base = base + readl(&chan_shmem->tx_base);
+ rx->rear = base + readl(&chan_shmem->tx_rear);
+ rx->front = base + readl(&chan_shmem->tx_front);
+
+ dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n",
+ achan->id, achan->poll_completion, achan->mlen, achan->qlen);
+}
+
+/**
+ * acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM
+ * firmware.
+ * @achan: ACPM channel info.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_achan_alloc_cmds(struct acpm_chan *achan)
+{
+ struct device *dev = achan->acpm->dev;
+ struct acpm_rx_data *rx_data;
+ size_t cmd_size, n_cmd;
+ int i;
+
+ if (achan->mlen == 0)
+ return 0;
+
+ cmd_size = sizeof(*(achan->rx_data[0].cmd));
+ n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size);
+
+ for (i = 0; i < ACPM_SEQNUM_MAX; i++) {
+ rx_data = &achan->rx_data[i];
+ rx_data->n_cmd = n_cmd;
+ rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL);
+ if (!rx_data->cmd)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_free_mbox_chans() - free mailbox channels.
+ * @acpm: pointer to driver data.
+ */
+static void acpm_free_mbox_chans(struct acpm_info *acpm)
+{
+ int i;
+
+ for (i = 0; i < acpm->num_chans; i++)
+ if (!IS_ERR_OR_NULL(acpm->chans[i].chan))
+ mbox_free_channel(acpm->chans[i].chan);
+}
+
+/**
+ * acpm_channels_init() - initialize channels based on the configuration data in
+ * the shared memory.
+ * @acpm: pointer to driver data.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_channels_init(struct acpm_info *acpm)
+{
+ struct acpm_shmem __iomem *shmem = acpm->shmem;
+ struct acpm_chan_shmem __iomem *chans_shmem;
+ struct device *dev = acpm->dev;
+ int i, ret;
+
+ acpm->num_chans = readl(&shmem->num_chans);
+ acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans),
+ GFP_KERNEL);
+ if (!acpm->chans)
+ return -ENOMEM;
+
+ chans_shmem = acpm->sram_base + readl(&shmem->chans);
+
+ for (i = 0; i < acpm->num_chans; i++) {
+ struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i];
+ struct acpm_chan *achan = &acpm->chans[i];
+ struct mbox_client *cl = &achan->cl;
+
+ achan->acpm = acpm;
+
+ acpm_chan_shmem_get_params(achan, chan_shmem);
+
+ ret = acpm_achan_alloc_cmds(achan);
+ if (ret)
+ return ret;
+
+ mutex_init(&achan->rx_lock);
+ mutex_init(&achan->tx_lock);
+
+ cl->dev = dev;
+
+ achan->chan = mbox_request_channel(cl, 0);
+ if (IS_ERR(achan->chan)) {
+ acpm_free_mbox_chans(acpm);
+ return PTR_ERR(achan->chan);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_setup_ops() - setup the operations structures.
+ * @acpm: pointer to the driver data.
+ */
+static void acpm_setup_ops(struct acpm_info *acpm)
+{
+ struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
+
+ pmic_ops->read_reg = acpm_pmic_read_reg;
+ pmic_ops->bulk_read = acpm_pmic_bulk_read;
+ pmic_ops->write_reg = acpm_pmic_write_reg;
+ pmic_ops->bulk_write = acpm_pmic_bulk_write;
+ pmic_ops->update_reg = acpm_pmic_update_reg;
+}
+
+static int acpm_probe(struct platform_device *pdev)
+{
+ const struct acpm_match_data *match_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *shmem;
+ struct acpm_info *acpm;
+ resource_size_t size;
+ struct resource res;
+ int ret;
+
+ acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL);
+ if (!acpm)
+ return -ENOMEM;
+
+ shmem = of_parse_phandle(dev->of_node, "shmem", 0);
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get shared memory.\n");
+
+ size = resource_size(&res);
+ acpm->sram_base = devm_ioremap(dev, res.start, size);
+ if (!acpm->sram_base)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to ioremap shared memory.\n");
+
+ match_data = of_device_get_match_data(dev);
+ if (!match_data)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to get match data.\n");
+
+ acpm->shmem = acpm->sram_base + match_data->initdata_base;
+ acpm->dev = dev;
+
+ ret = acpm_channels_init(acpm);
+ if (ret)
+ return ret;
+
+ acpm_setup_ops(acpm);
+
+ platform_set_drvdata(pdev, acpm);
+
+ return 0;
+}
+
+/**
+ * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle.
+ * @handle: Handle acquired by acpm_get_by_phandle.
+ */
+static void acpm_handle_put(const struct acpm_handle *handle)
+{
+ struct acpm_info *acpm = handle_to_acpm_info(handle);
+ struct device *dev = acpm->dev;
+
+ module_put(dev->driver->owner);
+ /* Drop reference taken with of_find_device_by_node(). */
+ put_device(dev);
+}
+
+/**
+ * devm_acpm_release() - devres release method.
+ * @dev: pointer to device.
+ * @res: pointer to resource.
+ */
+static void devm_acpm_release(struct device *dev, void *res)
+{
+ acpm_handle_put(*(struct acpm_handle **)res);
+}
+
+/**
+ * acpm_get_by_phandle() - get the ACPM handle using DT phandle.
+ * @dev: device pointer requesting ACPM handle.
+ * @property: property name containing phandle on ACPM node.
+ *
+ * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
+ */
+static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ struct platform_device *pdev;
+ struct device_node *acpm_np;
+ struct device_link *link;
+ struct acpm_info *acpm;
+
+ acpm_np = of_parse_phandle(dev->of_node, property, 0);
+ if (!acpm_np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(acpm_np);
+ if (!pdev) {
+ dev_err(dev, "Cannot find device node %s\n", acpm_np->name);
+ of_node_put(acpm_np);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ of_node_put(acpm_np);
+
+ acpm = platform_get_drvdata(pdev);
+ if (!acpm) {
+ dev_err(dev, "Cannot get drvdata from %s\n",
+ dev_name(&pdev->dev));
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (!try_module_get(pdev->dev.driver->owner)) {
+ dev_err(dev, "Cannot get module reference.\n");
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
+ if (!link) {
+ dev_err(&pdev->dev,
+ "Failed to create device link to consumer %s.\n",
+ dev_name(dev));
+ platform_device_put(pdev);
+ module_put(pdev->dev.driver->owner);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &acpm->handle;
+}
+
+/**
+ * devm_acpm_get_by_phandle() - managed get handle using phandle.
+ * @dev: device pointer requesting ACPM handle.
+ * @property: property name containing phandle on ACPM node.
+ *
+ * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
+ */
+const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ const struct acpm_handle **ptr, *handle;
+
+ ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ handle = acpm_get_by_phandle(dev, property);
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+
+static const struct acpm_match_data acpm_gs101 = {
+ .initdata_base = ACPM_GS101_INITDATA_BASE,
+};
+
+static const struct of_device_id acpm_match[] = {
+ {
+ .compatible = "google,gs101-acpm-ipc",
+ .data = &acpm_gs101,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, acpm_match);
+
+static struct platform_driver acpm_driver = {
+ .probe = acpm_probe,
+ .driver = {
+ .name = "exynos-acpm-protocol",
+ .of_match_table = acpm_match,
+ },
+};
+module_platform_driver(acpm_driver);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
+MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/samsung/exynos-acpm.h b/drivers/firmware/samsung/exynos-acpm.h
new file mode 100644
index 000000000000..2d14cb58f98c
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_H__
+#define __EXYNOS_ACPM_H__
+
+struct acpm_xfer {
+ const u32 *txd;
+ u32 *rxd;
+ size_t txlen;
+ size_t rxlen;
+ unsigned int acpm_chan_id;
+};
+
+struct acpm_handle;
+
+int acpm_do_xfer(const struct acpm_handle *handle,
+ const struct acpm_xfer *xfer);
+
+#endif /* __EXYNOS_ACPM_H__ */
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index f3319be20b36..a123c05cbc9e 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -6,8 +6,11 @@
#include <linux/bitmap.h>
#include <linux/cache.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/string.h>
+#include <uapi/linux/psci.h>
+
#include <asm/hypervisor.h>
static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
@@ -51,3 +54,66 @@ bool kvm_arm_hyp_service_available(u32 func_id)
return test_bit(func_id, __kvm_arm_hyp_services);
}
EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
+
+#ifdef CONFIG_ARM64
+void __init kvm_arm_target_impl_cpu_init(void)
+{
+ int i;
+ u32 ver;
+ u64 max_cpus;
+ struct arm_smccc_res res;
+ struct target_impl_cpu *target;
+
+ if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) ||
+ !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS))
+ return;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID,
+ 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return;
+
+ /* Version info is in lower 32 bits and is in SMMCCC_VERSION format */
+ ver = lower_32_bits(res.a1);
+ if (PSCI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Unsupported target CPU implementation version v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+ return;
+ }
+
+ if (!res.a2) {
+ pr_warn("No target implementation CPUs specified\n");
+ return;
+ }
+
+ max_cpus = res.a2;
+ target = memblock_alloc(sizeof(*target) * max_cpus, __alignof__(*target));
+ if (!target) {
+ pr_warn("Not enough memory for struct target_impl_cpu\n");
+ return;
+ }
+
+ for (i = 0; i < max_cpus; i++) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
+ i, 0, 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_warn("Discovering target implementation CPUs failed\n");
+ goto mem_free;
+ }
+ target[i].midr = res.a1;
+ target[i].revidr = res.a2;
+ target[i].aidr = res.a3;
+ }
+
+ if (!cpu_errata_set_target_impl(max_cpus, target)) {
+ pr_warn("Failed to set target implementation CPUs\n");
+ goto mem_free;
+ }
+
+ pr_info("Number of target implementation CPUs is %lld\n", max_cpus);
+ return;
+
+mem_free:
+ memblock_free(target, sizeof(*target) * max_cpus);
+}
+#endif
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
index 1990263fbba0..c24b3fca1cfe 100644
--- a/drivers/firmware/smccc/soc_id.c
+++ b/drivers/firmware/smccc/soc_id.c
@@ -32,6 +32,85 @@
static struct soc_device *soc_dev;
static struct soc_device_attribute *soc_dev_attr;
+#ifdef CONFIG_ARM64
+
+static char __ro_after_init smccc_soc_id_name[136] = "";
+
+static inline void str_fragment_from_reg(char *dst, unsigned long reg)
+{
+ dst[0] = (reg >> 0) & 0xff;
+ dst[1] = (reg >> 8) & 0xff;
+ dst[2] = (reg >> 16) & 0xff;
+ dst[3] = (reg >> 24) & 0xff;
+ dst[4] = (reg >> 32) & 0xff;
+ dst[5] = (reg >> 40) & 0xff;
+ dst[6] = (reg >> 48) & 0xff;
+ dst[7] = (reg >> 56) & 0xff;
+}
+
+static char __init *smccc_soc_name_init(void)
+{
+ struct arm_smccc_1_2_regs args;
+ struct arm_smccc_1_2_regs res;
+ size_t len;
+
+ /*
+ * Issue Number 1.6 of the Arm SMC Calling Convention
+ * specification introduces an optional "name" string
+ * to the ARM_SMCCC_ARCH_SOC_ID function. Fetch it if
+ * available.
+ */
+ args.a0 = ARM_SMCCC_ARCH_SOC_ID;
+ args.a1 = 2; /* SOC_ID name */
+ arm_smccc_1_2_invoke(&args, &res);
+
+ if ((u32)res.a0 == 0) {
+ /*
+ * Copy res.a1..res.a17 to the smccc_soc_id_name string
+ * 8 bytes at a time. As per Issue 1.6 of the Arm SMC
+ * Calling Convention, the string will be NUL terminated
+ * and padded, from the end of the string to the end of the
+ * 136 byte buffer, with NULs.
+ */
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 0, res.a1);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 1, res.a2);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 2, res.a3);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 3, res.a4);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 4, res.a5);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 5, res.a6);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 6, res.a7);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 7, res.a8);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 8, res.a9);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 9, res.a10);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 10, res.a11);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 11, res.a12);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 12, res.a13);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 13, res.a14);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 14, res.a15);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 15, res.a16);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 16, res.a17);
+
+ len = strnlen(smccc_soc_id_name, sizeof(smccc_soc_id_name));
+ if (len) {
+ if (len == sizeof(smccc_soc_id_name))
+ pr_warn(FW_BUG "Ignoring improperly formatted name\n");
+ else
+ return smccc_soc_id_name;
+ }
+ }
+
+ return NULL;
+}
+
+#else
+
+static char __init *smccc_soc_name_init(void)
+{
+ return NULL;
+}
+
+#endif
+
static int __init smccc_soc_init(void)
{
int soc_id_rev, soc_id_version;
@@ -72,6 +151,7 @@ static int __init smccc_soc_init(void)
soc_dev_attr->soc_id = soc_id_str;
soc_dev_attr->revision = soc_id_rev_str;
soc_dev_attr->family = soc_id_jep106_id_str;
+ soc_dev_attr->machine = smccc_soc_name_init();
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 3c52cb73237a..e3f990d888d7 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1224,22 +1224,28 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
if (!svc->intel_svc_fcs) {
dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
ret = -ENOMEM;
- goto err_unregister_dev;
+ goto err_unregister_rsu_dev;
}
ret = platform_device_add(svc->intel_svc_fcs);
if (ret) {
platform_device_put(svc->intel_svc_fcs);
- goto err_unregister_dev;
+ goto err_unregister_rsu_dev;
}
+ ret = of_platform_default_populate(dev_of_node(dev), NULL, dev);
+ if (ret)
+ goto err_unregister_fcs_dev;
+
dev_set_drvdata(dev, svc);
pr_info("Intel Service Layer Driver Initialized\n");
return 0;
-err_unregister_dev:
+err_unregister_fcs_dev:
+ platform_device_unregister(svc->intel_svc_fcs);
+err_unregister_rsu_dev:
platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
@@ -1253,6 +1259,8 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ of_platform_depopulate(ctrl->dev);
+
platform_device_unregister(svc->intel_svc_fcs);
platform_device_unregister(svc->stratix10_svc_rsu);
diff --git a/drivers/firmware/thead,th1520-aon.c b/drivers/firmware/thead,th1520-aon.c
new file mode 100644
index 000000000000..38f812ac9920
--- /dev/null
+++ b/drivers/firmware/thead,th1520-aon.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#include <linux/device.h>
+#include <linux/firmware/thead/thead,th1520-aon.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/slab.h>
+
+#define MAX_RX_TIMEOUT (msecs_to_jiffies(3000))
+#define MAX_TX_TIMEOUT 500
+
+struct th1520_aon_chan {
+ struct mbox_chan *ch;
+ struct th1520_aon_rpc_ack_common ack_msg;
+ struct mbox_client cl;
+ struct completion done;
+
+ /* make sure only one RPC is performed at a time */
+ struct mutex transaction_lock;
+};
+
+struct th1520_aon_msg_req_set_resource_power_mode {
+ struct th1520_aon_rpc_msg_hdr hdr;
+ u16 resource;
+ u16 mode;
+ u16 reserved[10];
+} __packed __aligned(1);
+
+/*
+ * This type is used to indicate error response for most functions.
+ */
+enum th1520_aon_error_codes {
+ LIGHT_AON_ERR_NONE = 0, /* Success */
+ LIGHT_AON_ERR_VERSION = 1, /* Incompatible API version */
+ LIGHT_AON_ERR_CONFIG = 2, /* Configuration error */
+ LIGHT_AON_ERR_PARM = 3, /* Bad parameter */
+ LIGHT_AON_ERR_NOACCESS = 4, /* Permission error (no access) */
+ LIGHT_AON_ERR_LOCKED = 5, /* Permission error (locked) */
+ LIGHT_AON_ERR_UNAVAILABLE = 6, /* Unavailable (out of resources) */
+ LIGHT_AON_ERR_NOTFOUND = 7, /* Not found */
+ LIGHT_AON_ERR_NOPOWER = 8, /* No power */
+ LIGHT_AON_ERR_IPC = 9, /* Generic IPC error */
+ LIGHT_AON_ERR_BUSY = 10, /* Resource is currently busy/active */
+ LIGHT_AON_ERR_FAIL = 11, /* General I/O failure */
+ LIGHT_AON_ERR_LAST
+};
+
+static int th1520_aon_linux_errmap[LIGHT_AON_ERR_LAST] = {
+ 0, /* LIGHT_AON_ERR_NONE */
+ -EINVAL, /* LIGHT_AON_ERR_VERSION */
+ -EINVAL, /* LIGHT_AON_ERR_CONFIG */
+ -EINVAL, /* LIGHT_AON_ERR_PARM */
+ -EACCES, /* LIGHT_AON_ERR_NOACCESS */
+ -EACCES, /* LIGHT_AON_ERR_LOCKED */
+ -ERANGE, /* LIGHT_AON_ERR_UNAVAILABLE */
+ -EEXIST, /* LIGHT_AON_ERR_NOTFOUND */
+ -EPERM, /* LIGHT_AON_ERR_NOPOWER */
+ -EPIPE, /* LIGHT_AON_ERR_IPC */
+ -EBUSY, /* LIGHT_AON_ERR_BUSY */
+ -EIO, /* LIGHT_AON_ERR_FAIL */
+};
+
+static inline int th1520_aon_to_linux_errno(int errno)
+{
+ if (errno >= LIGHT_AON_ERR_NONE && errno < LIGHT_AON_ERR_LAST)
+ return th1520_aon_linux_errmap[errno];
+
+ return -EIO;
+}
+
+static void th1520_aon_rx_callback(struct mbox_client *c, void *rx_msg)
+{
+ struct th1520_aon_chan *aon_chan =
+ container_of(c, struct th1520_aon_chan, cl);
+ struct th1520_aon_rpc_msg_hdr *hdr =
+ (struct th1520_aon_rpc_msg_hdr *)rx_msg;
+ u8 recv_size = sizeof(struct th1520_aon_rpc_msg_hdr) + hdr->size;
+
+ if (recv_size != sizeof(struct th1520_aon_rpc_ack_common)) {
+ dev_err(c->dev, "Invalid ack size, not completing\n");
+ return;
+ }
+
+ memcpy(&aon_chan->ack_msg, rx_msg, recv_size);
+ complete(&aon_chan->done);
+}
+
+/**
+ * th1520_aon_call_rpc() - Send an RPC request to the TH1520 AON subsystem
+ * @aon_chan: Pointer to the AON channel structure
+ * @msg: Pointer to the message (RPC payload) that will be sent
+ *
+ * This function sends an RPC message to the TH1520 AON subsystem via mailbox.
+ * It takes the provided @msg buffer, formats it with version and service flags,
+ * then blocks until the RPC completes or times out. The completion is signaled
+ * by the `aon_chan->done` completion, which is waited upon for a duration
+ * defined by `MAX_RX_TIMEOUT`.
+ *
+ * Return:
+ * * 0 on success
+ * * -ETIMEDOUT if the RPC call times out
+ * * A negative error code if the mailbox send fails or if AON responds with
+ * a non-zero error code (converted via th1520_aon_to_linux_errno()).
+ */
+int th1520_aon_call_rpc(struct th1520_aon_chan *aon_chan, void *msg)
+{
+ struct th1520_aon_rpc_msg_hdr *hdr = msg;
+ int ret;
+
+ mutex_lock(&aon_chan->transaction_lock);
+ reinit_completion(&aon_chan->done);
+
+ RPC_SET_VER(hdr, TH1520_AON_RPC_VERSION);
+ RPC_SET_SVC_ID(hdr, hdr->svc);
+ RPC_SET_SVC_FLAG_MSG_TYPE(hdr, RPC_SVC_MSG_TYPE_DATA);
+ RPC_SET_SVC_FLAG_ACK_TYPE(hdr, RPC_SVC_MSG_NEED_ACK);
+
+ ret = mbox_send_message(aon_chan->ch, msg);
+ if (ret < 0) {
+ dev_err(aon_chan->cl.dev, "RPC send msg failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&aon_chan->done, MAX_RX_TIMEOUT)) {
+ dev_err(aon_chan->cl.dev, "RPC send msg timeout\n");
+ mutex_unlock(&aon_chan->transaction_lock);
+ return -ETIMEDOUT;
+ }
+
+ ret = aon_chan->ack_msg.err_code;
+
+out:
+ mutex_unlock(&aon_chan->transaction_lock);
+
+ return th1520_aon_to_linux_errno(ret);
+}
+EXPORT_SYMBOL_GPL(th1520_aon_call_rpc);
+
+/**
+ * th1520_aon_power_update() - Change power state of a resource via TH1520 AON
+ * @aon_chan: Pointer to the AON channel structure
+ * @rsrc: Resource ID whose power state needs to be updated
+ * @power_on: Boolean indicating whether the resource should be powered on (true)
+ * or powered off (false)
+ *
+ * This function requests the TH1520 AON subsystem to set the power mode of the
+ * given resource (@rsrc) to either on or off. It constructs the message in
+ * `struct th1520_aon_msg_req_set_resource_power_mode` and then invokes
+ * th1520_aon_call_rpc() to make the request. If the AON call fails, an error
+ * message is logged along with the specific return code.
+ *
+ * Return:
+ * * 0 on success
+ * * A negative error code in case of failures (propagated from
+ * th1520_aon_call_rpc()).
+ */
+int th1520_aon_power_update(struct th1520_aon_chan *aon_chan, u16 rsrc,
+ bool power_on)
+{
+ struct th1520_aon_msg_req_set_resource_power_mode msg = {};
+ struct th1520_aon_rpc_msg_hdr *hdr = &msg.hdr;
+ int ret;
+
+ hdr->svc = TH1520_AON_RPC_SVC_PM;
+ hdr->func = TH1520_AON_PM_FUNC_SET_RESOURCE_POWER_MODE;
+ hdr->size = TH1520_AON_RPC_MSG_NUM;
+
+ RPC_SET_BE16(&msg.resource, 0, rsrc);
+ RPC_SET_BE16(&msg.resource, 2,
+ (power_on ? TH1520_AON_PM_PW_MODE_ON :
+ TH1520_AON_PM_PW_MODE_OFF));
+
+ ret = th1520_aon_call_rpc(aon_chan, &msg);
+ if (ret)
+ dev_err(aon_chan->cl.dev, "failed to power %s resource %d ret %d\n",
+ power_on ? "up" : "off", rsrc, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(th1520_aon_power_update);
+
+/**
+ * th1520_aon_init() - Initialize TH1520 AON firmware protocol interface
+ * @dev: Device pointer for the AON subsystem
+ *
+ * This function initializes the TH1520 AON firmware protocol interface by:
+ * - Allocating and initializing the AON channel structure
+ * - Setting up the mailbox client
+ * - Requesting the AON mailbox channel
+ * - Initializing synchronization primitives
+ *
+ * Return:
+ * * Valid pointer to th1520_aon_chan structure on success
+ * * ERR_PTR(-ENOMEM) if memory allocation fails
+ * * ERR_PTR() with other negative error codes from mailbox operations
+ */
+struct th1520_aon_chan *th1520_aon_init(struct device *dev)
+{
+ struct th1520_aon_chan *aon_chan;
+ struct mbox_client *cl;
+ int ret;
+
+ aon_chan = kzalloc(sizeof(*aon_chan), GFP_KERNEL);
+ if (!aon_chan)
+ return ERR_PTR(-ENOMEM);
+
+ cl = &aon_chan->cl;
+ cl->dev = dev;
+ cl->tx_block = true;
+ cl->tx_tout = MAX_TX_TIMEOUT;
+ cl->rx_callback = th1520_aon_rx_callback;
+
+ aon_chan->ch = mbox_request_channel_byname(cl, "aon");
+ if (IS_ERR(aon_chan->ch)) {
+ dev_err(dev, "Failed to request aon mbox chan\n");
+ ret = PTR_ERR(aon_chan->ch);
+ kfree(aon_chan);
+ return ERR_PTR(ret);
+ }
+
+ mutex_init(&aon_chan->transaction_lock);
+ init_completion(&aon_chan->done);
+
+ return aon_chan;
+}
+EXPORT_SYMBOL_GPL(th1520_aon_init);
+
+/**
+ * th1520_aon_deinit() - Clean up TH1520 AON firmware protocol interface
+ * @aon_chan: Pointer to the AON channel structure to clean up
+ *
+ * This function cleans up resources allocated by th1520_aon_init():
+ * - Frees the mailbox channel
+ * - Frees the AON channel
+ */
+void th1520_aon_deinit(struct th1520_aon_chan *aon_chan)
+{
+ mbox_free_channel(aon_chan->ch);
+ kfree(aon_chan);
+}
+EXPORT_SYMBOL_GPL(th1520_aon_deinit);
+
+MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>");
+MODULE_DESCRIPTION("T-HEAD TH1520 Always-On firmware protocol library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 720fa8b5d8e9..7356e860e65c 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1139,17 +1139,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
int zynqmp_pm_fpga_get_config_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
- u32 buf, lower_addr, upper_addr;
int ret;
if (!value)
return -EINVAL;
- lower_addr = lower_32_bits((u64)&buf);
- upper_addr = upper_32_bits((u64)&buf);
-
ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4,
- XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr,
+ XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, 0, 0,
XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG);
*value = ret_payload[1];