summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/android/binderfs.c4
-rw-r--r--drivers/bus/mhi/core/main.c3
-rw-r--r--drivers/bus/mhi/core/pm.c143
-rw-r--r--drivers/char/tlclk.c17
-rw-r--r--drivers/clk/zynqmp/clk-gate-zynqmp.c9
-rw-r--r--drivers/clk/zynqmp/clk-mux-zynqmp.c6
-rw-r--r--drivers/clk/zynqmp/clkc.c17
-rw-r--r--drivers/clk/zynqmp/divider.c12
-rw-r--r--drivers/clk/zynqmp/pll.c29
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c22
-rw-r--r--drivers/firmware/stratix10-rsu.c10
-rw-r--r--drivers/firmware/stratix10-svc.c62
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c5
-rw-r--r--drivers/firmware/xilinx/zynqmp.c603
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/Makefile1
-rw-r--r--drivers/fpga/dfl-fme-main.c4
-rw-r--r--drivers/fpga/dfl-fme-perf.c1020
-rw-r--r--drivers/fpga/dfl-fme.h2
-rw-r--r--drivers/fpga/dfl.h2
-rw-r--r--drivers/fpga/stratix10-soc.c25
-rw-r--r--drivers/fpga/zynqmp-fpga.c14
-rw-r--r--drivers/greybus/Kconfig6
-rw-r--r--drivers/misc/cardreader/rts5261.c9
-rw-r--r--drivers/misc/lkdtm/bugs.c2
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c10
-rw-r--r--drivers/misc/sgi-xp/xpnet.c8
-rw-r--r--drivers/misc/xilinx_sdfec.c11
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c38
-rw-r--r--drivers/nvmem/core.c52
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c11
-rw-r--r--drivers/parport/daisy.c29
-rw-r--r--drivers/parport/ieee1284.c94
-rw-r--r--drivers/parport/ieee1284_ops.c70
-rw-r--r--drivers/parport/parport_amiga.c22
-rw-r--r--drivers/parport/parport_atari.c2
-rw-r--r--drivers/parport/parport_cs.c6
-rw-r--r--drivers/parport/parport_gsc.c25
-rw-r--r--drivers/parport/parport_gsc.h21
-rw-r--r--drivers/parport/parport_ip32.c117
-rw-r--r--drivers/parport/parport_mfc3.c21
-rw-r--r--drivers/parport/parport_pc.c263
-rw-r--r--drivers/parport/parport_sunbpp.c2
-rw-r--r--drivers/parport/probe.c34
-rw-r--r--drivers/parport/procfs.c6
-rw-r--r--drivers/parport/share.c292
-rw-r--r--drivers/reset/reset-zynqmp.c26
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c5
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c26
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c17
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c5
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c3
-rw-r--r--drivers/visorbus/controlvmchannel.h2
-rw-r--r--drivers/visorbus/vbuschannel.h2
-rw-r--r--drivers/visorbus/visorbus_private.h2
57 files changed, 2195 insertions, 1060 deletions
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 9ecad74183a3..7cf566aafe1f 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -650,7 +650,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
struct binderfs_info *info;
struct binderfs_mount_opts *ctx = fc->fs_private;
struct inode *inode = NULL;
- struct binderfs_device device_info = { 0 };
+ struct binderfs_device device_info = {};
const char *name;
size_t len;
@@ -747,7 +747,7 @@ static const struct fs_context_operations binderfs_fs_context_ops = {
static int binderfs_init_fs_context(struct fs_context *fc)
{
- struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_mount_opts *ctx;
ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
if (!ctx)
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 97e06cc586e4..c26eed06e5cc 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -258,7 +258,7 @@ int mhi_destroy_device(struct device *dev, void *data)
return 0;
}
-static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
{
struct mhi_driver *mhi_drv;
@@ -270,6 +270,7 @@ static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
if (mhi_drv->status_cb)
mhi_drv->status_cb(mhi_dev, cb_reason);
}
+EXPORT_SYMBOL_GPL(mhi_notify);
/* Bind MHI channels to MHI devices */
void mhi_create_devices(struct mhi_controller *mhi_cntrl)
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index dc83d65f7784..e7c831867a23 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -669,6 +669,149 @@ void mhi_pm_st_worker(struct work_struct *work)
}
}
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
+ int ret;
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return -EINVAL;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Return busy if there are any pending resources */
+ if (atomic_read(&mhi_cntrl->dev_wake))
+ return -EBUSY;
+
+ /* Take MHI out of M2 state */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M1 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Could not enter M0/M1 state");
+ return -EIO;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+
+ if (atomic_read(&mhi_cntrl->dev_wake)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ return -EBUSY;
+ }
+
+ dev_info(dev, "Allowing M3 transition\n");
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+ if (new_state != MHI_PM_M3_ENTER) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_err(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M3 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev, "Wait for M3 completion\n");
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M3 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M3 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Notify clients about entering LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+ mutex_unlock(&itr->mutex);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_suspend);
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state cur_state;
+ int ret;
+
+ dev_info(dev, "Entered with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return 0;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Notify clients about exiting LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+ mutex_unlock(&itr->mutex);
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
+ if (cur_state != MHI_PM_M3_EXIT) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_EXIT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M0 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M0 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume);
+
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
{
int ret;
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 6d81bb3bb503..896a3550fba9 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -777,17 +777,21 @@ static int __init tlclk_init(void)
{
int ret;
+ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
+
+ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
+ if (!alarm_events) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
if (ret < 0) {
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
+ kfree(alarm_events);
return ret;
}
tlclk_major = ret;
- alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
- if (!alarm_events) {
- ret = -ENOMEM;
- goto out1;
- }
/* Read telecom clock IRQ number (Set by BIOS) */
if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
@@ -796,7 +800,6 @@ static int __init tlclk_init(void)
ret = -EBUSY;
goto out2;
}
- telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
@@ -837,8 +840,8 @@ out3:
release_region(TLCLK_BASE, 8);
out2:
kfree(alarm_events);
-out1:
unregister_chrdev(tlclk_major, "telco_clock");
+out1:
return ret;
}
diff --git a/drivers/clk/zynqmp/clk-gate-zynqmp.c b/drivers/clk/zynqmp/clk-gate-zynqmp.c
index 83b236f20fff..10c9b889324f 100644
--- a/drivers/clk/zynqmp/clk-gate-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-gate-zynqmp.c
@@ -37,9 +37,8 @@ static int zynqmp_clk_gate_enable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_enable(clk_id);
+ ret = zynqmp_pm_clock_enable(clk_id);
if (ret)
pr_warn_once("%s() clock enabled failed for %s, ret = %d\n",
@@ -58,9 +57,8 @@ static void zynqmp_clk_gate_disable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_disable(clk_id);
+ ret = zynqmp_pm_clock_disable(clk_id);
if (ret)
pr_warn_once("%s() clock disable failed for %s, ret = %d\n",
@@ -79,9 +77,8 @@ static int zynqmp_clk_gate_is_enabled(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int state, ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getstate(clk_id, &state);
+ ret = zynqmp_pm_clock_getstate(clk_id, &state);
if (ret) {
pr_warn_once("%s() clock get state failed for %s, ret = %d\n",
__func__, clk_name, ret);
diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
index 0af8f74c5fa5..06194149be83 100644
--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
@@ -47,9 +47,8 @@ static u8 zynqmp_clk_mux_get_parent(struct clk_hw *hw)
u32 clk_id = mux->clk_id;
u32 val;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getparent(clk_id, &val);
+ ret = zynqmp_pm_clock_getparent(clk_id, &val);
if (ret)
pr_warn_once("%s() getparent failed for clock: %s, ret = %d\n",
@@ -71,9 +70,8 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = mux->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_setparent(clk_id, index);
+ ret = zynqmp_pm_clock_setparent(clk_id, index);
if (ret)
pr_warn_once("%s() set parent failed for clock: %s, ret = %d\n",
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index 10e89f23880b..5eed5ce10179 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -134,7 +134,6 @@ static struct clk_hw *(* const clk_topology[]) (const char *name, u32 clk_id,
static struct zynqmp_clock *clock;
static struct clk_hw_onecell_data *zynqmp_data;
static unsigned int clock_max_idx;
-static const struct zynqmp_eemi_ops *eemi_ops;
/**
* zynqmp_is_valid_clock() - Check whether clock is valid or not
@@ -206,7 +205,7 @@ static int zynqmp_pm_clock_get_num_clocks(u32 *nclocks)
qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
*nclocks = ret_payload[1];
return ret;
@@ -231,7 +230,7 @@ static int zynqmp_pm_clock_get_name(u32 clock_id,
qdata.qid = PM_QID_CLOCK_GET_NAME;
qdata.arg1 = clock_id;
- eemi_ops->query_data(qdata, ret_payload);
+ zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, ret_payload, sizeof(*response));
return 0;
@@ -265,7 +264,7 @@ static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index,
qdata.arg1 = clock_id;
qdata.arg2 = index;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -296,7 +295,7 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
qdata.qid = PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS;
qdata.arg1 = clk_id;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
if (ret)
return ERR_PTR(ret);
@@ -339,7 +338,7 @@ static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index,
qdata.arg1 = clock_id;
qdata.arg2 = index;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -364,7 +363,7 @@ static int zynqmp_pm_clock_get_attributes(u32 clock_id,
qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
qdata.arg1 = clock_id;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -738,10 +737,6 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
int ret;
struct device *dev = &pdev->dev;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
ret = zynqmp_clk_setup(dev->of_node);
return ret;
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 4be2cc76aa2e..8eed715707e3 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -83,9 +83,8 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
u32 div_type = divider->div_type;
u32 div, value;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getdivider(clk_id, &div);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &div);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
@@ -163,11 +162,10 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
u32 div_type = divider->div_type;
u32 bestdiv;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- ret = eemi_ops->clock_getdivider(clk_id, &bestdiv);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &bestdiv);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
@@ -219,7 +217,6 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
u32 div_type = divider->div_type;
u32 value, div;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
value = zynqmp_divider_get_val(parent_rate, rate, divider->flags);
if (div_type == TYPE_DIV1) {
@@ -233,7 +230,7 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
div = __ffs(div);
- ret = eemi_ops->clock_setdivider(clk_id, div);
+ ret = zynqmp_pm_clock_setdivider(clk_id, div);
if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
@@ -258,7 +255,6 @@ static const struct clk_ops zynqmp_clk_divider_ops = {
*/
u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -266,7 +262,7 @@ u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
qdata.arg1 = clk_id;
qdata.arg2 = type;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
/*
* To maintain backward compatibility return maximum possible value
* (0xFFFF) if query for max divisor is not successful.
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 89b599530105..92f449ed38e5 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -50,10 +50,8 @@ static inline enum pll_mode zynqmp_pll_get_mode(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->ioctl(0, IOCTL_GET_PLL_FRAC_MODE, clk_id, 0,
- ret_payload);
+ ret = zynqmp_pm_get_pll_frac_mode(clk_id, ret_payload);
if (ret)
pr_warn_once("%s() PLL get frac mode failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -73,14 +71,13 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
const char *clk_name = clk_hw_get_name(hw);
int ret;
u32 mode;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (on)
mode = PLL_MODE_FRAC;
else
mode = PLL_MODE_INT;
- ret = eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_MODE, clk_id, mode, NULL);
+ ret = zynqmp_pm_set_pll_frac_mode(clk_id, mode);
if (ret)
pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -139,17 +136,15 @@ static unsigned long zynqmp_pll_recalc_rate(struct clk_hw *hw,
unsigned long rate, frac;
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getdivider(clk_id, &fbdiv);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &fbdiv);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
rate = parent_rate * fbdiv;
if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
- eemi_ops->ioctl(0, IOCTL_GET_PLL_FRAC_DATA, clk_id, 0,
- ret_payload);
+ zynqmp_pm_get_pll_frac_data(clk_id, ret_payload);
data = ret_payload[1];
frac = (parent_rate * data) / FRAC_DIV;
rate = rate + frac;
@@ -177,7 +172,6 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
u32 fbdiv;
long rate_div, frac, m, f;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
rate_div = (rate * FRAC_DIV) / parent_rate;
@@ -187,21 +181,21 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
rate = parent_rate * m;
frac = (parent_rate * f) / FRAC_DIV;
- ret = eemi_ops->clock_setdivider(clk_id, m);
+ ret = zynqmp_pm_clock_setdivider(clk_id, m);
if (ret == -EUSERS)
WARN(1, "More than allowed devices are using the %s, which is forbidden\n",
clk_name);
else if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
- eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_DATA, clk_id, f, NULL);
+ zynqmp_pm_set_pll_frac_data(clk_id, f);
return rate + frac;
}
fbdiv = DIV_ROUND_CLOSEST(rate, parent_rate);
fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- ret = eemi_ops->clock_setdivider(clk_id, fbdiv);
+ ret = zynqmp_pm_clock_setdivider(clk_id, fbdiv);
if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -222,9 +216,8 @@ static int zynqmp_pll_is_enabled(struct clk_hw *hw)
u32 clk_id = clk->clk_id;
unsigned int state;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getstate(clk_id, &state);
+ ret = zynqmp_pm_clock_getstate(clk_id, &state);
if (ret) {
pr_warn_once("%s() clock get state failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -246,12 +239,11 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = clk->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (zynqmp_pll_is_enabled(hw))
return 0;
- ret = eemi_ops->clock_enable(clk_id);
+ ret = zynqmp_pm_clock_enable(clk_id);
if (ret)
pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -269,12 +261,11 @@ static void zynqmp_pll_disable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = clk->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (!zynqmp_pll_is_enabled(hw))
return;
- ret = eemi_ops->clock_disable(clk_id);
+ ret = zynqmp_pm_clock_disable(clk_id);
if (ret)
pr_warn_once("%s() clock disable failed for %s, ret = %d\n",
__func__, clk_name, ret);
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 09f7f468eef8..cd11558893cd 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -46,7 +46,6 @@ struct zynqmp_aead_drv_ctx {
} alg;
struct device *dev;
struct crypto_engine *engine;
- const struct zynqmp_eemi_ops *eemi_ops;
};
struct zynqmp_aead_hw_req {
@@ -80,21 +79,15 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
struct device *dev = tfm_ctx->dev;
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct zynqmp_aead_drv_ctx *drv_ctx;
struct zynqmp_aead_hw_req *hwreq;
dma_addr_t dma_addr_data, dma_addr_hw_req;
unsigned int data_size;
unsigned int status;
+ int ret;
size_t dma_size;
char *kbuf;
int err;
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
-
- if (!drv_ctx->eemi_ops->aes)
- return -ENOTSUPP;
-
if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY)
dma_size = req->cryptlen + ZYNQMP_AES_KEY_SIZE
+ GCM_AES_IV_SIZE;
@@ -136,9 +129,12 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
hwreq->key = 0;
}
- drv_ctx->eemi_ops->aes(dma_addr_hw_req, &status);
+ ret = zynqmp_pm_aes_engine(dma_addr_hw_req, &status);
- if (status) {
+ if (ret) {
+ dev_err(dev, "ERROR: AES PM API failed\n");
+ err = ret;
+ } else if (status) {
switch (status) {
case ZYNQMP_AES_GCM_TAG_MISMATCH_ERR:
dev_err(dev, "ERROR: Gcm Tag mismatch\n");
@@ -388,12 +384,6 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev)
else
return -ENODEV;
- aes_drv_ctx.eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(aes_drv_ctx.eemi_ops)) {
- dev_err(dev, "Failed to get ZynqMP EEMI interface\n");
- return PTR_ERR(aes_drv_ctx.eemi_ops);
- }
-
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
if (err < 0) {
dev_err(dev, "No usable DMA configuration\n");
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index f8533338b018..4379475c99ed 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -72,7 +72,7 @@ static void rsu_status_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
- if (data->status == BIT(SVC_STATUS_RSU_OK)) {
+ if (data->status == BIT(SVC_STATUS_OK)) {
priv->status.version = FIELD_GET(RSU_VERSION_MASK,
res->a2);
priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
@@ -108,9 +108,9 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
{
struct stratix10_rsu_priv *priv = client->priv;
- if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
dev_warn(client->dev, "Secure FW doesn't support notify\n");
- else if (data->status == BIT(SVC_STATUS_RSU_ERROR))
+ else if (data->status == BIT(SVC_STATUS_ERROR))
dev_err(client->dev, "Failure, returned status is %lu\n",
BIT(data->status));
@@ -133,9 +133,9 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
unsigned int *counter = (unsigned int *)data->kaddr1;
- if (data->status == BIT(SVC_STATUS_RSU_OK))
+ if (data->status == BIT(SVC_STATUS_OK))
priv->retry_counter = *counter;
- else if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
dev_warn(client->dev, "Secure FW doesn't support retry\n");
else
dev_err(client->dev, "Failed to get retry counter %lu\n",
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index d5f0769f3761..e0db8dbfc9d1 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -214,7 +214,7 @@ static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
complete(&ctrl->complete_status);
break;
}
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_DONE);
+ cb_data->status = BIT(SVC_STATUS_BUFFER_DONE);
cb_data->kaddr1 = svc_pa_to_va(res.a1);
cb_data->kaddr2 = (res.a2) ?
svc_pa_to_va(res.a2) : NULL;
@@ -227,7 +227,7 @@ static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
__func__);
}
} while (res.a0 == INTEL_SIP_SMC_STATUS_OK ||
- res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY ||
+ res.a0 == INTEL_SIP_SMC_STATUS_BUSY ||
wait_for_completion_timeout(&ctrl->complete_status, timeout));
}
@@ -250,7 +250,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
cb_data->kaddr1 = NULL;
cb_data->kaddr2 = NULL;
cb_data->kaddr3 = NULL;
- cb_data->status = BIT(SVC_STATUS_RECONFIG_ERROR);
+ cb_data->status = BIT(SVC_STATUS_ERROR);
pr_debug("%s: polling config status\n", __func__);
@@ -259,7 +259,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_ISDONE,
0, 0, 0, 0, 0, 0, 0, &res);
if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) ||
- (res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR))
+ (res.a0 == INTEL_SIP_SMC_STATUS_ERROR))
break;
/*
@@ -271,7 +271,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
}
if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
- cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
}
@@ -294,24 +294,18 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
switch (p_data->command) {
case COMMAND_RECONFIG:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_REQUEST_OK);
+ case COMMAND_RSU_UPDATE:
+ case COMMAND_RSU_NOTIFY:
+ cb_data->status = BIT(SVC_STATUS_OK);
break;
case COMMAND_RECONFIG_DATA_SUBMIT:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
- break;
- case COMMAND_NOOP:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
- cb_data->kaddr1 = svc_pa_to_va(res.a1);
+ cb_data->status = BIT(SVC_STATUS_BUFFER_SUBMITTED);
break;
case COMMAND_RECONFIG_STATUS:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
- break;
- case COMMAND_RSU_UPDATE:
- case COMMAND_RSU_NOTIFY:
- cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
break;
case COMMAND_RSU_RETRY:
- cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
default:
@@ -430,9 +424,9 @@ static int svc_normal_to_secure_thread(void *data)
if (pdata->command == COMMAND_RSU_STATUS) {
if (res.a0 == INTEL_SIP_SMC_RSU_ERROR)
- cbdata->status = BIT(SVC_STATUS_RSU_ERROR);
+ cbdata->status = BIT(SVC_STATUS_ERROR);
else
- cbdata->status = BIT(SVC_STATUS_RSU_OK);
+ cbdata->status = BIT(SVC_STATUS_OK);
cbdata->kaddr1 = &res;
cbdata->kaddr2 = NULL;
@@ -445,7 +439,7 @@ static int svc_normal_to_secure_thread(void *data)
case INTEL_SIP_SMC_STATUS_OK:
svc_thread_recv_status_ok(pdata, cbdata, res);
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY:
+ case INTEL_SIP_SMC_STATUS_BUSY:
switch (pdata->command) {
case COMMAND_RECONFIG_DATA_SUBMIT:
svc_thread_cmd_data_claim(ctrl,
@@ -460,33 +454,13 @@ static int svc_normal_to_secure_thread(void *data)
break;
}
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED:
+ case INTEL_SIP_SMC_STATUS_REJECTED:
pr_debug("%s: STATUS_REJECTED\n", __func__);
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
+ case INTEL_SIP_SMC_STATUS_ERROR:
case INTEL_SIP_SMC_RSU_ERROR:
pr_err("%s: STATUS_ERROR\n", __func__);
- switch (pdata->command) {
- /* for FPGA mgr */
- case COMMAND_RECONFIG_DATA_CLAIM:
- case COMMAND_RECONFIG:
- case COMMAND_RECONFIG_DATA_SUBMIT:
- case COMMAND_RECONFIG_STATUS:
- cbdata->status =
- BIT(SVC_STATUS_RECONFIG_ERROR);
- break;
-
- /* for RSU */
- case COMMAND_RSU_STATUS:
- case COMMAND_RSU_UPDATE:
- case COMMAND_RSU_NOTIFY:
- case COMMAND_RSU_RETRY:
- cbdata->status =
- BIT(SVC_STATUS_RSU_ERROR);
- break;
- }
-
- cbdata->status = BIT(SVC_STATUS_RECONFIG_ERROR);
+ cbdata->status = BIT(SVC_STATUS_ERROR);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
@@ -502,7 +476,7 @@ static int svc_normal_to_secure_thread(void *data)
if ((pdata->command == COMMAND_RSU_RETRY) ||
(pdata->command == COMMAND_RSU_NOTIFY)) {
cbdata->status =
- BIT(SVC_STATUS_RSU_NO_SUPPORT);
+ BIT(SVC_STATUS_NO_SUPPORT);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 43bc6cfdab45..99606b34975e 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -85,14 +85,13 @@ static int get_pm_api_id(char *pm_api_req, u32 *pm_id)
static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
u32 pm_api_version;
int ret;
struct zynqmp_pm_query_data qdata = {0};
switch (pm_id) {
case PM_GET_API_VERSION:
- ret = eemi_ops->get_api_version(&pm_api_version);
+ ret = zynqmp_pm_get_api_version(&pm_api_version);
sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
pm_api_version >> 16, pm_api_version & 0xffff);
break;
@@ -102,7 +101,7 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
qdata.arg2 = pm_api_arg[2];
qdata.arg3 = pm_api_arg[3];
- ret = eemi_ops->query_data(qdata, pm_api_ret);
+ ret = zynqmp_pm_query_data(qdata, pm_api_ret);
if (ret)
break;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 41b65164a367..8095fa84d5d7 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2018 Xilinx, Inc.
+ * Copyright (C) 2014-2020 Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -24,8 +24,6 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
-static const struct zynqmp_eemi_ops *eemi_ops_tbl;
-
static bool feature_check_enabled;
static u32 zynqmp_pm_features[PM_API_MAX];
@@ -219,7 +217,7 @@ static u32 pm_tz_version;
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_get_api_version(u32 *version)
+int zynqmp_pm_get_api_version(u32 *version)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -237,6 +235,7 @@ static int zynqmp_pm_get_api_version(u32 *version)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_api_version);
/**
* zynqmp_pm_get_chipid - Get silicon ID registers
@@ -246,7 +245,7 @@ static int zynqmp_pm_get_api_version(u32 *version)
* Return: Returns the status of the operation and the idcode and version
* registers in @idcode and @version.
*/
-static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -260,6 +259,7 @@ static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
@@ -324,7 +324,7 @@ static int get_set_conduit_method(struct device_node *np)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
+int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
{
int ret;
@@ -338,6 +338,7 @@ static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
*/
return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_query_data);
/**
* zynqmp_pm_clock_enable() - Enable the clock for given id
@@ -348,10 +349,11 @@ static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_enable(u32 clock_id)
+int zynqmp_pm_clock_enable(u32 clock_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, clock_id, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable);
/**
* zynqmp_pm_clock_disable() - Disable the clock for given id
@@ -362,10 +364,11 @@ static int zynqmp_pm_clock_enable(u32 clock_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_disable(u32 clock_id)
+int zynqmp_pm_clock_disable(u32 clock_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, clock_id, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_disable);
/**
* zynqmp_pm_clock_getstate() - Get the clock state for given id
@@ -377,7 +380,7 @@ static int zynqmp_pm_clock_disable(u32 clock_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -388,6 +391,7 @@ static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getstate);
/**
* zynqmp_pm_clock_setdivider() - Set the clock divider for given id
@@ -399,11 +403,12 @@ static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, clock_id, divider,
0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setdivider);
/**
* zynqmp_pm_clock_getdivider() - Get the clock divider for given id
@@ -415,7 +420,7 @@ static int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -426,6 +431,7 @@ static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getdivider);
/**
* zynqmp_pm_clock_setrate() - Set the clock rate for given id
@@ -436,13 +442,14 @@ static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
+int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETRATE, clock_id,
lower_32_bits(rate),
upper_32_bits(rate),
0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setrate);
/**
* zynqmp_pm_clock_getrate() - Get the clock rate for given id
@@ -454,7 +461,7 @@ static int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
+int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -465,6 +472,7 @@ static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate);
/**
* zynqmp_pm_clock_setparent() - Set the clock parent for given id
@@ -475,11 +483,12 @@ static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, clock_id,
parent_id, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setparent);
/**
* zynqmp_pm_clock_getparent() - Get the clock parent for given id
@@ -491,7 +500,7 @@ static int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -502,48 +511,191 @@ static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getparent);
/**
- * zynqmp_is_valid_ioctl() - Check whether IOCTL ID is valid or not
- * @ioctl_id: IOCTL ID
+ * zynqmp_pm_set_pll_frac_mode() - PM API for set PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode (PLL_MODE_FRAC/PLL_MODE_INT)
+ *
+ * This function sets PLL mode
*
- * Return: 1 if IOCTL is valid else 0
+ * Return: Returns status, either success or error+reason
*/
-static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
+int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
{
- switch (ioctl_id) {
- case IOCTL_SD_DLL_RESET:
- case IOCTL_SET_SD_TAPDELAY:
- case IOCTL_SET_PLL_FRAC_MODE:
- case IOCTL_GET_PLL_FRAC_MODE:
- case IOCTL_SET_PLL_FRAC_DATA:
- case IOCTL_GET_PLL_FRAC_DATA:
- return 1;
- default:
- return 0;
- }
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_MODE,
+ clk_id, mode, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode);
/**
- * zynqmp_pm_ioctl() - PM IOCTL API for device control and configs
- * @node_id: Node ID of the device
- * @ioctl_id: ID of the requested IOCTL
- * @arg1: Argument 1 to requested IOCTL call
- * @arg2: Argument 2 to requested IOCTL call
- * @out: Returned output value
+ * zynqmp_pm_get_pll_frac_mode() - PM API for get PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode
*
- * This function calls IOCTL to firmware for device control and configuration.
+ * This function return current PLL mode
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
- u32 *out)
+int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
{
- if (!zynqmp_is_valid_ioctl(ioctl_id))
- return -EINVAL;
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_MODE,
+ clk_id, 0, mode);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode);
+
+/**
+ * zynqmp_pm_set_pll_frac_data() - PM API for setting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function sets fraction data.
+ * It is valid for fraction mode only.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_DATA,
+ clk_id, data, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data);
+
+/**
+ * zynqmp_pm_get_pll_frac_data() - PM API for getting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function returns fraction data value.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_DATA,
+ clk_id, 0, data);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
+
+/**
+ * zynqmp_pm_set_sd_tapdelay() - Set tap delay for the SD device
+ *
+ * @node_id Node ID of the device
+ * @type Type of tap delay to set (input/output)
+ * @value Value to set fot the tap delay
+ *
+ * This function sets input/output tap delay for the SD device.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
+ type, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
+
+/**
+ * zynqmp_pm_sd_dll_reset() - Reset DLL logic
+ *
+ * @node_id Node ID of the device
+ * @type Reset type
+ *
+ * This function resets DLL logic for the SD device.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
+ type, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, ioctl_id,
- arg1, arg2, out);
+/**
+ * zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
+ * @index GGS register index
+ * @value Register value to be written
+ *
+ * This function writes value to GGS register.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_ggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_GGS,
+ index, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
+
+/**
+ * zynqmp_pm_write_ggs() - PM API for reading global general storage (ggs)
+ * @index GGS register index
+ * @value Register value to be written
+ *
+ * This function returns GGS register value.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_ggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_GGS,
+ index, 0, value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
+
+/**
+ * zynqmp_pm_write_pggs() - PM API for writing persistent global general
+ * storage (pggs)
+ * @index PGGS register index
+ * @value Register value to be written
+ *
+ * This function writes value to PGGS register.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_pggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_PGGS, index, value,
+ NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
+
+/**
+ * zynqmp_pm_write_pggs() - PM API for reading persistent global general
+ * storage (pggs)
+ * @index PGGS register index
+ * @value Register value to be written
+ *
+ * This function returns PGGS register value.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_pggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_PGGS, index, 0,
+ value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
+
+/**
+ * zynqmp_pm_set_boot_health_status() - PM API for setting healthy boot status
+ * @value Status value to be written
+ *
+ * This function sets healthy bit value to indicate boot health status
+ * to firmware.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_boot_health_status(u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_BOOT_HEALTH_STATUS,
+ value, 0, NULL);
}
/**
@@ -554,12 +706,13 @@ static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
- const enum zynqmp_pm_reset_action assert_flag)
+int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
{
return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
/**
* zynqmp_pm_reset_get_status - Get status of the reset
@@ -568,8 +721,7 @@ static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
- u32 *status)
+int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -583,6 +735,7 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_get_status);
/**
* zynqmp_pm_fpga_load - Perform the fpga load
@@ -597,12 +750,12 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
- const u32 flags)
+int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags)
{
return zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
upper_32_bits(address), size, flags, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_load);
/**
* zynqmp_pm_fpga_get_status - Read value from PCAP status register
@@ -613,7 +766,7 @@ static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_fpga_get_status(u32 *value)
+int zynqmp_pm_fpga_get_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -626,6 +779,7 @@ static int zynqmp_pm_fpga_get_status(u32 *value)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
/**
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
@@ -636,10 +790,11 @@ static int zynqmp_pm_fpga_get_status(u32 *value)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_init_finalize(void)
+int zynqmp_pm_init_finalize(void)
{
return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
/**
* zynqmp_pm_set_suspend_mode() - Set system suspend mode
@@ -649,10 +804,11 @@ static int zynqmp_pm_init_finalize(void)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_set_suspend_mode(u32 mode)
+int zynqmp_pm_set_suspend_mode(u32 mode)
{
return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode);
/**
* zynqmp_pm_request_node() - Request a node with specific capabilities
@@ -666,13 +822,13 @@ static int zynqmp_pm_set_suspend_mode(u32 mode)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack)
+int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos, const enum zynqmp_pm_request_ack ack)
{
return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
qos, ack, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_request_node);
/**
* zynqmp_pm_release_node() - Release a node
@@ -684,10 +840,11 @@ static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_release_node(const u32 node)
+int zynqmp_pm_release_node(const u32 node)
{
return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_release_node);
/**
* zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
@@ -701,13 +858,14 @@ static int zynqmp_pm_release_node(const u32 node)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack)
+int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
{
return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
qos, ack, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement);
/**
* zynqmp_pm_aes - Access AES hardware to encrypt/decrypt the data using
@@ -717,7 +875,7 @@ static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
*
* Return: Returns status, either success or error code.
*/
-static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+int zynqmp_pm_aes_engine(const u64 address, u32 *out)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -732,47 +890,304 @@ static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
+
+/**
+ * zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
+ * @type: Shutdown or restart? 0 for shutdown, 1 for restart
+ * @subtype: Specifies which system should be restarted or shut down
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
+ 0, 0, NULL);
+}
+
+/**
+ * struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
+ * @subtype: Shutdown subtype
+ * @name: Matching string for scope argument
+ *
+ * This struct encapsulates mapping between shutdown scope ID and string.
+ */
+struct zynqmp_pm_shutdown_scope {
+ const enum zynqmp_pm_shutdown_subtype subtype;
+ const char *name;
+};
-static const struct zynqmp_eemi_ops eemi_ops = {
- .get_api_version = zynqmp_pm_get_api_version,
- .get_chipid = zynqmp_pm_get_chipid,
- .query_data = zynqmp_pm_query_data,
- .clock_enable = zynqmp_pm_clock_enable,
- .clock_disable = zynqmp_pm_clock_disable,
- .clock_getstate = zynqmp_pm_clock_getstate,
- .clock_setdivider = zynqmp_pm_clock_setdivider,
- .clock_getdivider = zynqmp_pm_clock_getdivider,
- .clock_setrate = zynqmp_pm_clock_setrate,
- .clock_getrate = zynqmp_pm_clock_getrate,
- .clock_setparent = zynqmp_pm_clock_setparent,
- .clock_getparent = zynqmp_pm_clock_getparent,
- .ioctl = zynqmp_pm_ioctl,
- .reset_assert = zynqmp_pm_reset_assert,
- .reset_get_status = zynqmp_pm_reset_get_status,
- .init_finalize = zynqmp_pm_init_finalize,
- .set_suspend_mode = zynqmp_pm_set_suspend_mode,
- .request_node = zynqmp_pm_request_node,
- .release_node = zynqmp_pm_release_node,
- .set_requirement = zynqmp_pm_set_requirement,
- .fpga_load = zynqmp_pm_fpga_load,
- .fpga_get_status = zynqmp_pm_fpga_get_status,
- .aes = zynqmp_pm_aes_engine,
+static struct zynqmp_pm_shutdown_scope shutdown_scopes[] = {
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ .name = "subsystem",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ .name = "ps_only",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+ .name = "system",
+ },
};
+static struct zynqmp_pm_shutdown_scope *selected_scope =
+ &shutdown_scopes[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM];
+
/**
- * zynqmp_pm_get_eemi_ops - Get eemi ops functions
+ * zynqmp_pm_is_shutdown_scope_valid - Check if shutdown scope string is valid
+ * @scope_string: Shutdown scope string
*
- * Return: Pointer of eemi_ops structure
+ * Return: Return pointer to matching shutdown scope struct from
+ * array of available options in system if string is valid,
+ * otherwise returns NULL.
*/
-const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
+static struct zynqmp_pm_shutdown_scope*
+ zynqmp_pm_is_shutdown_scope_valid(const char *scope_string)
+{
+ int count;
+
+ for (count = 0; count < ARRAY_SIZE(shutdown_scopes); count++)
+ if (sysfs_streq(scope_string, shutdown_scopes[count].name))
+ return &shutdown_scopes[count];
+
+ return NULL;
+}
+
+static ssize_t shutdown_scope_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(shutdown_scopes); i++) {
+ if (&shutdown_scopes[i] == selected_scope) {
+ strcat(buf, "[");
+ strcat(buf, shutdown_scopes[i].name);
+ strcat(buf, "]");
+ } else {
+ strcat(buf, shutdown_scopes[i].name);
+ }
+ strcat(buf, " ");
+ }
+ strcat(buf, "\n");
+
+ return strlen(buf);
+}
+
+static ssize_t shutdown_scope_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct zynqmp_pm_shutdown_scope *scope;
+
+ scope = zynqmp_pm_is_shutdown_scope_valid(buf);
+ if (!scope)
+ return -EINVAL;
+
+ ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ scope->subtype);
+ if (ret) {
+ pr_err("unable to set shutdown scope %s\n", buf);
+ return ret;
+ }
+
+ selected_scope = scope;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(shutdown_scope);
+
+static ssize_t health_status_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_set_boot_health_status(value);
+ if (ret) {
+ dev_err(device, "unable to set healthy bit value to %u\n",
+ value);
+ return ret;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(health_status);
+
+static ssize_t ggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_ggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t ggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
{
- if (eemi_ops_tbl)
- return eemi_ops_tbl;
- else
- return ERR_PTR(-EPROBE_DEFER);
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+ ret = zynqmp_pm_write_ggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+err:
+ return count;
}
-EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops);
+
+/* GGS register show functions */
+#define GGS0_SHOW(N) \
+ ssize_t ggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return ggs_show(device, attr, buf, N); \
+ }
+
+static GGS0_SHOW(0);
+static GGS0_SHOW(1);
+static GGS0_SHOW(2);
+static GGS0_SHOW(3);
+
+/* GGS register store function */
+#define GGS0_STORE(N) \
+ ssize_t ggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return ggs_store(device, attr, buf, count, N); \
+ }
+
+static GGS0_STORE(0);
+static GGS0_STORE(1);
+static GGS0_STORE(2);
+static GGS0_STORE(3);
+
+static ssize_t pggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_pggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t pggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
+{
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+
+ ret = zynqmp_pm_write_pggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+
+err:
+ return count;
+}
+
+#define PGGS0_SHOW(N) \
+ ssize_t pggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return pggs_show(device, attr, buf, N); \
+ }
+
+#define PGGS0_STORE(N) \
+ ssize_t pggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return pggs_store(device, attr, buf, count, N); \
+ }
+
+/* PGGS register show functions */
+static PGGS0_SHOW(0);
+static PGGS0_SHOW(1);
+static PGGS0_SHOW(2);
+static PGGS0_SHOW(3);
+
+/* PGGS register store functions */
+static PGGS0_STORE(0);
+static PGGS0_STORE(1);
+static PGGS0_STORE(2);
+static PGGS0_STORE(3);
+
+/* GGS register attributes */
+static DEVICE_ATTR_RW(ggs0);
+static DEVICE_ATTR_RW(ggs1);
+static DEVICE_ATTR_RW(ggs2);
+static DEVICE_ATTR_RW(ggs3);
+
+/* PGGS register attributes */
+static DEVICE_ATTR_RW(pggs0);
+static DEVICE_ATTR_RW(pggs1);
+static DEVICE_ATTR_RW(pggs2);
+static DEVICE_ATTR_RW(pggs3);
+
+static struct attribute *zynqmp_firmware_attrs[] = {
+ &dev_attr_ggs0.attr,
+ &dev_attr_ggs1.attr,
+ &dev_attr_ggs2.attr,
+ &dev_attr_ggs3.attr,
+ &dev_attr_pggs0.attr,
+ &dev_attr_pggs1.attr,
+ &dev_attr_pggs2.attr,
+ &dev_attr_pggs3.attr,
+ &dev_attr_shutdown_scope.attr,
+ &dev_attr_health_status.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(zynqmp_firmware);
static int zynqmp_firmware_probe(struct platform_device *pdev)
{
@@ -820,9 +1235,6 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
pr_info("%s Trustzone version v%d.%d\n", __func__,
pm_tz_version >> 16, pm_tz_version & 0xFFFF);
- /* Assign eemi_ops_table */
- eemi_ops_tbl = &eemi_ops;
-
zynqmp_pm_api_debugfs_init();
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
@@ -854,6 +1266,7 @@ static struct platform_driver zynqmp_firmware_driver = {
.driver = {
.name = "zynqmp_firmware",
.of_match_table = zynqmp_firmware_of_match,
+ .dev_groups = zynqmp_firmware_groups,
},
.probe = zynqmp_firmware_probe,
.remove = zynqmp_firmware_remove,
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 72380e1d31c7..b2408a710662 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -156,7 +156,7 @@ config FPGA_DFL
config FPGA_DFL_FME
tristate "FPGA DFL FME Driver"
- depends on FPGA_DFL && HWMON
+ depends on FPGA_DFL && HWMON && PERF_EVENTS
help
The FPGA Management Engine (FME) is a feature device implemented
under Device Feature List (DFL) framework. Select this option to
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 4865b74b00a4..d8e21dfc6778 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o
+dfl-fme-objs += dfl-fme-perf.o
dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
dfl-afu-objs += dfl-afu-error.o
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 1d4690c99268..353305629a0e 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -580,6 +580,10 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
.ops = &fme_power_mgmt_ops,
},
{
+ .id_table = fme_perf_id_table,
+ .ops = &fme_perf_ops,
+ },
+ {
.ops = NULL,
},
};
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
new file mode 100644
index 000000000000..6ce1ed222ea4
--- /dev/null
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -0,0 +1,1020 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME) Global Performance Reporting
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xu Yilun <yilun.xu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Mitchel, Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/perf_event.h>
+#include "dfl.h"
+#include "dfl-fme.h"
+
+/*
+ * Performance Counter Registers for Cache.
+ *
+ * Cache Events are listed below as CACHE_EVNT_*.
+ */
+#define CACHE_CTRL 0x8
+#define CACHE_RESET_CNTR BIT_ULL(0)
+#define CACHE_FREEZE_CNTR BIT_ULL(8)
+#define CACHE_CTRL_EVNT GENMASK_ULL(19, 16)
+#define CACHE_EVNT_RD_HIT 0x0
+#define CACHE_EVNT_WR_HIT 0x1
+#define CACHE_EVNT_RD_MISS 0x2
+#define CACHE_EVNT_WR_MISS 0x3
+#define CACHE_EVNT_RSVD 0x4
+#define CACHE_EVNT_HOLD_REQ 0x5
+#define CACHE_EVNT_DATA_WR_PORT_CONTEN 0x6
+#define CACHE_EVNT_TAG_WR_PORT_CONTEN 0x7
+#define CACHE_EVNT_TX_REQ_STALL 0x8
+#define CACHE_EVNT_RX_REQ_STALL 0x9
+#define CACHE_EVNT_EVICTIONS 0xa
+#define CACHE_EVNT_MAX CACHE_EVNT_EVICTIONS
+#define CACHE_CHANNEL_SEL BIT_ULL(20)
+#define CACHE_CHANNEL_RD 0
+#define CACHE_CHANNEL_WR 1
+#define CACHE_CNTR0 0x10
+#define CACHE_CNTR1 0x18
+#define CACHE_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define CACHE_CNTR_EVNT GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Fabric.
+ *
+ * Fabric Events are listed below as FAB_EVNT_*
+ */
+#define FAB_CTRL 0x20
+#define FAB_RESET_CNTR BIT_ULL(0)
+#define FAB_FREEZE_CNTR BIT_ULL(8)
+#define FAB_CTRL_EVNT GENMASK_ULL(19, 16)
+#define FAB_EVNT_PCIE0_RD 0x0
+#define FAB_EVNT_PCIE0_WR 0x1
+#define FAB_EVNT_PCIE1_RD 0x2
+#define FAB_EVNT_PCIE1_WR 0x3
+#define FAB_EVNT_UPI_RD 0x4
+#define FAB_EVNT_UPI_WR 0x5
+#define FAB_EVNT_MMIO_RD 0x6
+#define FAB_EVNT_MMIO_WR 0x7
+#define FAB_EVNT_MAX FAB_EVNT_MMIO_WR
+#define FAB_PORT_ID GENMASK_ULL(21, 20)
+#define FAB_PORT_FILTER BIT_ULL(23)
+#define FAB_PORT_FILTER_DISABLE 0
+#define FAB_PORT_FILTER_ENABLE 1
+#define FAB_CNTR 0x28
+#define FAB_CNTR_EVNT_CNTR GENMASK_ULL(59, 0)
+#define FAB_CNTR_EVNT GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Clock.
+ *
+ * Clock Counter can't be reset or frozen by SW.
+ */
+#define CLK_CNTR 0x30
+#define BASIC_EVNT_CLK 0x0
+#define BASIC_EVNT_MAX BASIC_EVNT_CLK
+
+/*
+ * Performance Counter Registers for IOMMU / VT-D.
+ *
+ * VT-D Events are listed below as VTD_EVNT_* and VTD_SIP_EVNT_*
+ */
+#define VTD_CTRL 0x38
+#define VTD_RESET_CNTR BIT_ULL(0)
+#define VTD_FREEZE_CNTR BIT_ULL(8)
+#define VTD_CTRL_EVNT GENMASK_ULL(19, 16)
+#define VTD_EVNT_AFU_MEM_RD_TRANS 0x0
+#define VTD_EVNT_AFU_MEM_WR_TRANS 0x1
+#define VTD_EVNT_AFU_DEVTLB_RD_HIT 0x2
+#define VTD_EVNT_AFU_DEVTLB_WR_HIT 0x3
+#define VTD_EVNT_DEVTLB_4K_FILL 0x4
+#define VTD_EVNT_DEVTLB_2M_FILL 0x5
+#define VTD_EVNT_DEVTLB_1G_FILL 0x6
+#define VTD_EVNT_MAX VTD_EVNT_DEVTLB_1G_FILL
+#define VTD_CNTR 0x40
+#define VTD_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define VTD_CNTR_EVNT GENMASK_ULL(63, 60)
+
+#define VTD_SIP_CTRL 0x48
+#define VTD_SIP_RESET_CNTR BIT_ULL(0)
+#define VTD_SIP_FREEZE_CNTR BIT_ULL(8)
+#define VTD_SIP_CTRL_EVNT GENMASK_ULL(19, 16)
+#define VTD_SIP_EVNT_IOTLB_4K_HIT 0x0
+#define VTD_SIP_EVNT_IOTLB_2M_HIT 0x1
+#define VTD_SIP_EVNT_IOTLB_1G_HIT 0x2
+#define VTD_SIP_EVNT_SLPWC_L3_HIT 0x3
+#define VTD_SIP_EVNT_SLPWC_L4_HIT 0x4
+#define VTD_SIP_EVNT_RCC_HIT 0x5
+#define VTD_SIP_EVNT_IOTLB_4K_MISS 0x6
+#define VTD_SIP_EVNT_IOTLB_2M_MISS 0x7
+#define VTD_SIP_EVNT_IOTLB_1G_MISS 0x8
+#define VTD_SIP_EVNT_SLPWC_L3_MISS 0x9
+#define VTD_SIP_EVNT_SLPWC_L4_MISS 0xa
+#define VTD_SIP_EVNT_RCC_MISS 0xb
+#define VTD_SIP_EVNT_MAX VTD_SIP_EVNT_SLPWC_L4_MISS
+#define VTD_SIP_CNTR 0X50
+#define VTD_SIP_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define VTD_SIP_CNTR_EVNT GENMASK_ULL(63, 60)
+
+#define PERF_TIMEOUT 30
+
+#define PERF_MAX_PORT_NUM 1U
+
+/**
+ * struct fme_perf_priv - priv data structure for fme perf driver
+ *
+ * @dev: parent device.
+ * @ioaddr: mapped base address of mmio region.
+ * @pmu: pmu data structure for fme perf counters.
+ * @id: id of this fme performance report private feature.
+ * @fab_users: current user number on fabric counters.
+ * @fab_port_id: used to indicate current working mode of fabric counters.
+ * @fab_lock: lock to protect fabric counters working mode.
+ * @cpu: active CPU to which the PMU is bound for accesses.
+ * @cpuhp_node: node for CPU hotplug notifier link.
+ * @cpuhp_state: state for CPU hotplug notification;
+ */
+struct fme_perf_priv {
+ struct device *dev;
+ void __iomem *ioaddr;
+ struct pmu pmu;
+ u64 id;
+
+ u32 fab_users;
+ u32 fab_port_id;
+ spinlock_t fab_lock;
+
+ unsigned int cpu;
+ struct hlist_node node;
+ enum cpuhp_state cpuhp_state;
+};
+
+/**
+ * struct fme_perf_event_ops - callbacks for fme perf events
+ *
+ * @event_init: callback invoked during event init.
+ * @event_destroy: callback invoked during event destroy.
+ * @read_counter: callback to read hardware counters.
+ */
+struct fme_perf_event_ops {
+ int (*event_init)(struct fme_perf_priv *priv, u32 event, u32 portid);
+ void (*event_destroy)(struct fme_perf_priv *priv, u32 event,
+ u32 portid);
+ u64 (*read_counter)(struct fme_perf_priv *priv, u32 event, u32 portid);
+};
+
+#define to_fme_perf_priv(_pmu) container_of(_pmu, struct fme_perf_priv, pmu)
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct fme_perf_priv *priv;
+
+ priv = to_fme_perf_priv(pmu);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(priv->cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *fme_perf_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group fme_perf_cpumask_group = {
+ .attrs = fme_perf_cpumask_attrs,
+};
+
+#define FME_EVENT_MASK GENMASK_ULL(11, 0)
+#define FME_EVENT_SHIFT 0
+#define FME_EVTYPE_MASK GENMASK_ULL(15, 12)
+#define FME_EVTYPE_SHIFT 12
+#define FME_EVTYPE_BASIC 0
+#define FME_EVTYPE_CACHE 1
+#define FME_EVTYPE_FABRIC 2
+#define FME_EVTYPE_VTD 3
+#define FME_EVTYPE_VTD_SIP 4
+#define FME_EVTYPE_MAX FME_EVTYPE_VTD_SIP
+#define FME_PORTID_MASK GENMASK_ULL(23, 16)
+#define FME_PORTID_SHIFT 16
+#define FME_PORTID_ROOT (0xffU)
+
+#define get_event(_config) FIELD_GET(FME_EVENT_MASK, _config)
+#define get_evtype(_config) FIELD_GET(FME_EVTYPE_MASK, _config)
+#define get_portid(_config) FIELD_GET(FME_PORTID_MASK, _config)
+
+PMU_FORMAT_ATTR(event, "config:0-11");
+PMU_FORMAT_ATTR(evtype, "config:12-15");
+PMU_FORMAT_ATTR(portid, "config:16-23");
+
+static struct attribute *fme_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ &format_attr_evtype.attr,
+ &format_attr_portid.attr,
+ NULL,
+};
+
+static struct attribute_group fme_perf_format_group = {
+ .name = "format",
+ .attrs = fme_perf_format_attrs,
+};
+
+/*
+ * There are no default events, but we need to create
+ * "events" group (with empty attrs) before updating
+ * it with detected events (using pmu->attr_update).
+ */
+static struct attribute *fme_perf_events_attrs_empty[] = {
+ NULL,
+};
+
+static struct attribute_group fme_perf_events_group = {
+ .name = "events",
+ .attrs = fme_perf_events_attrs_empty,
+};
+
+static const struct attribute_group *fme_perf_groups[] = {
+ &fme_perf_format_group,
+ &fme_perf_cpumask_group,
+ &fme_perf_events_group,
+ NULL,
+};
+
+static bool is_portid_root(u32 portid)
+{
+ return portid == FME_PORTID_ROOT;
+}
+
+static bool is_portid_port(u32 portid)
+{
+ return portid < PERF_MAX_PORT_NUM;
+}
+
+static bool is_portid_root_or_port(u32 portid)
+{
+ return is_portid_root(portid) || is_portid_port(portid);
+}
+
+static u64 fme_read_perf_cntr_reg(void __iomem *addr)
+{
+ u32 low;
+ u64 v;
+
+ /*
+ * For 64bit counter registers, the counter may increases and carries
+ * out of bit [31] between 2 32bit reads. So add extra reads to help
+ * to prevent this issue. This only happens in platforms which don't
+ * support 64bit read - readq is split into 2 readl.
+ */
+ do {
+ v = readq(addr);
+ low = readl(addr);
+ } while (((u32)v) > low);
+
+ return v;
+}
+
+static int basic_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (event <= BASIC_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 basic_read_event_counter(struct fme_perf_priv *priv,
+ u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+
+ return fme_read_perf_cntr_reg(base + CLK_CNTR);
+}
+
+static int cache_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= CACHE_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 cache_read_event_counter(struct fme_perf_priv *priv,
+ u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v, count;
+ u8 channel;
+
+ if (event == CACHE_EVNT_WR_HIT || event == CACHE_EVNT_WR_MISS ||
+ event == CACHE_EVNT_DATA_WR_PORT_CONTEN ||
+ event == CACHE_EVNT_TAG_WR_PORT_CONTEN)
+ channel = CACHE_CHANNEL_WR;
+ else
+ channel = CACHE_CHANNEL_RD;
+
+ /* set channel access type and cache event code. */
+ v = readq(base + CACHE_CTRL);
+ v &= ~(CACHE_CHANNEL_SEL | CACHE_CTRL_EVNT);
+ v |= FIELD_PREP(CACHE_CHANNEL_SEL, channel);
+ v |= FIELD_PREP(CACHE_CTRL_EVNT, event);
+ writeq(v, base + CACHE_CTRL);
+
+ if (readq_poll_timeout_atomic(base + CACHE_CNTR0, v,
+ FIELD_GET(CACHE_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched cache event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + CACHE_CNTR0);
+ count = FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+ v = fme_read_perf_cntr_reg(base + CACHE_CNTR1);
+ count += FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+
+ return count;
+}
+
+static bool is_fabric_event_supported(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ if (event > FAB_EVNT_MAX || !is_portid_root_or_port(portid))
+ return false;
+
+ if (priv->id == FME_FEATURE_ID_GLOBAL_DPERF &&
+ (event == FAB_EVNT_PCIE1_RD || event == FAB_EVNT_UPI_RD ||
+ event == FAB_EVNT_PCIE1_WR || event == FAB_EVNT_UPI_WR))
+ return false;
+
+ return true;
+}
+
+static int fabric_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ int ret = 0;
+ u64 v;
+
+ if (!is_fabric_event_supported(priv, event, portid))
+ return -EINVAL;
+
+ /*
+ * as fabric counter set only can be in either overall or port mode.
+ * In overall mode, it counts overall data for FPGA, and in port mode,
+ * it is configured to monitor on one individual port.
+ *
+ * so every time, a new event is initialized, driver checks
+ * current working mode and if someone is using this counter set.
+ */
+ spin_lock(&priv->fab_lock);
+ if (priv->fab_users && priv->fab_port_id != portid) {
+ dev_dbg(priv->dev, "conflict fabric event monitoring mode.\n");
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ priv->fab_users++;
+
+ /*
+ * skip if current working mode matches, otherwise change the working
+ * mode per input port_id, to monitor overall data or another port.
+ */
+ if (priv->fab_port_id == portid)
+ goto exit;
+
+ priv->fab_port_id = portid;
+
+ v = readq(base + FAB_CTRL);
+ v &= ~(FAB_PORT_FILTER | FAB_PORT_ID);
+
+ if (is_portid_root(portid)) {
+ v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_DISABLE);
+ } else {
+ v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_ENABLE);
+ v |= FIELD_PREP(FAB_PORT_ID, portid);
+ }
+ writeq(v, base + FAB_CTRL);
+
+exit:
+ spin_unlock(&priv->fab_lock);
+ return ret;
+}
+
+static void fabric_event_destroy(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ spin_lock(&priv->fab_lock);
+ priv->fab_users--;
+ spin_unlock(&priv->fab_lock);
+}
+
+static u64 fabric_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ v = readq(base + FAB_CTRL);
+ v &= ~FAB_CTRL_EVNT;
+ v |= FIELD_PREP(FAB_CTRL_EVNT, event);
+ writeq(v, base + FAB_CTRL);
+
+ if (readq_poll_timeout_atomic(base + FAB_CNTR, v,
+ FIELD_GET(FAB_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched fab event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + FAB_CNTR);
+ return FIELD_GET(FAB_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= VTD_EVNT_MAX && is_portid_port(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 vtd_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ event += (portid * (VTD_EVNT_MAX + 1));
+
+ v = readq(base + VTD_CTRL);
+ v &= ~VTD_CTRL_EVNT;
+ v |= FIELD_PREP(VTD_CTRL_EVNT, event);
+ writeq(v, base + VTD_CTRL);
+
+ if (readq_poll_timeout_atomic(base + VTD_CNTR, v,
+ FIELD_GET(VTD_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched vtd event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + VTD_CNTR);
+ return FIELD_GET(VTD_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_sip_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= VTD_SIP_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 vtd_sip_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ v = readq(base + VTD_SIP_CTRL);
+ v &= ~VTD_SIP_CTRL_EVNT;
+ v |= FIELD_PREP(VTD_SIP_CTRL_EVNT, event);
+ writeq(v, base + VTD_SIP_CTRL);
+
+ if (readq_poll_timeout_atomic(base + VTD_SIP_CNTR, v,
+ FIELD_GET(VTD_SIP_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched vtd sip event code in counter register\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + VTD_SIP_CNTR);
+ return FIELD_GET(VTD_SIP_CNTR_EVNT_CNTR, v);
+}
+
+static struct fme_perf_event_ops fme_perf_event_ops[] = {
+ [FME_EVTYPE_BASIC] = {.event_init = basic_event_init,
+ .read_counter = basic_read_event_counter,},
+ [FME_EVTYPE_CACHE] = {.event_init = cache_event_init,
+ .read_counter = cache_read_event_counter,},
+ [FME_EVTYPE_FABRIC] = {.event_init = fabric_event_init,
+ .event_destroy = fabric_event_destroy,
+ .read_counter = fabric_read_event_counter,},
+ [FME_EVTYPE_VTD] = {.event_init = vtd_event_init,
+ .read_counter = vtd_read_event_counter,},
+ [FME_EVTYPE_VTD_SIP] = {.event_init = vtd_sip_event_init,
+ .read_counter = vtd_sip_read_event_counter,},
+};
+
+static ssize_t fme_perf_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+ unsigned long config;
+ char *ptr = buf;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ config = (unsigned long)eattr->var;
+
+ ptr += sprintf(ptr, "event=0x%02x", (unsigned int)get_event(config));
+ ptr += sprintf(ptr, ",evtype=0x%02x", (unsigned int)get_evtype(config));
+
+ if (is_portid_root(get_portid(config)))
+ ptr += sprintf(ptr, ",portid=0x%02x\n", FME_PORTID_ROOT);
+ else
+ ptr += sprintf(ptr, ",portid=?\n");
+
+ return (ssize_t)(ptr - buf);
+}
+
+#define FME_EVENT_ATTR(_name) \
+ __ATTR(_name, 0444, fme_perf_event_show, NULL)
+
+#define FME_PORT_EVENT_CONFIG(_event, _type) \
+ (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
+ (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK))
+
+#define FME_EVENT_CONFIG(_event, _type) \
+ (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
+ (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK) | \
+ (FME_PORTID_ROOT << FME_PORTID_SHIFT))
+
+/* FME Perf Basic Events */
+#define FME_EVENT_BASIC(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_##_name = { \
+ .attr = FME_EVENT_ATTR(_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_BASIC), \
+}
+
+FME_EVENT_BASIC(clock, BASIC_EVNT_CLK);
+
+static struct attribute *fme_perf_basic_events_attrs[] = {
+ &fme_perf_event_clock.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_basic_events_group = {
+ .name = "events",
+ .attrs = fme_perf_basic_events_attrs,
+};
+
+/* FME Perf Cache Events */
+#define FME_EVENT_CACHE(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_cache_##_name = { \
+ .attr = FME_EVENT_ATTR(cache_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_CACHE), \
+}
+
+FME_EVENT_CACHE(read_hit, CACHE_EVNT_RD_HIT);
+FME_EVENT_CACHE(read_miss, CACHE_EVNT_RD_MISS);
+FME_EVENT_CACHE(write_hit, CACHE_EVNT_WR_HIT);
+FME_EVENT_CACHE(write_miss, CACHE_EVNT_WR_MISS);
+FME_EVENT_CACHE(hold_request, CACHE_EVNT_HOLD_REQ);
+FME_EVENT_CACHE(tx_req_stall, CACHE_EVNT_TX_REQ_STALL);
+FME_EVENT_CACHE(rx_req_stall, CACHE_EVNT_RX_REQ_STALL);
+FME_EVENT_CACHE(eviction, CACHE_EVNT_EVICTIONS);
+FME_EVENT_CACHE(data_write_port_contention, CACHE_EVNT_DATA_WR_PORT_CONTEN);
+FME_EVENT_CACHE(tag_write_port_contention, CACHE_EVNT_TAG_WR_PORT_CONTEN);
+
+static struct attribute *fme_perf_cache_events_attrs[] = {
+ &fme_perf_event_cache_read_hit.attr.attr,
+ &fme_perf_event_cache_read_miss.attr.attr,
+ &fme_perf_event_cache_write_hit.attr.attr,
+ &fme_perf_event_cache_write_miss.attr.attr,
+ &fme_perf_event_cache_hold_request.attr.attr,
+ &fme_perf_event_cache_tx_req_stall.attr.attr,
+ &fme_perf_event_cache_rx_req_stall.attr.attr,
+ &fme_perf_event_cache_eviction.attr.attr,
+ &fme_perf_event_cache_data_write_port_contention.attr.attr,
+ &fme_perf_event_cache_tag_write_port_contention.attr.attr,
+ NULL,
+};
+
+static umode_t fme_perf_events_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+
+ return (priv->id == FME_FEATURE_ID_GLOBAL_IPERF) ? attr->mode : 0;
+}
+
+static const struct attribute_group fme_perf_cache_events_group = {
+ .name = "events",
+ .attrs = fme_perf_cache_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf Fabric Events */
+#define FME_EVENT_FABRIC(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_fab_##_name = { \
+ .attr = FME_EVENT_ATTR(fab_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
+}
+
+#define FME_EVENT_FABRIC_PORT(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_fab_port_##_name = { \
+ .attr = FME_EVENT_ATTR(fab_port_##_name), \
+ .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
+}
+
+FME_EVENT_FABRIC(pcie0_read, FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC(pcie1_read, FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC(upi_read, FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC(upi_write, FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC(mmio_read, FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC(mmio_write, FAB_EVNT_MMIO_WR);
+
+FME_EVENT_FABRIC_PORT(pcie0_read, FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC_PORT(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC_PORT(pcie1_read, FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC_PORT(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC_PORT(upi_read, FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC_PORT(upi_write, FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC_PORT(mmio_read, FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC_PORT(mmio_write, FAB_EVNT_MMIO_WR);
+
+static struct attribute *fme_perf_fabric_events_attrs[] = {
+ &fme_perf_event_fab_pcie0_read.attr.attr,
+ &fme_perf_event_fab_pcie0_write.attr.attr,
+ &fme_perf_event_fab_pcie1_read.attr.attr,
+ &fme_perf_event_fab_pcie1_write.attr.attr,
+ &fme_perf_event_fab_upi_read.attr.attr,
+ &fme_perf_event_fab_upi_write.attr.attr,
+ &fme_perf_event_fab_mmio_read.attr.attr,
+ &fme_perf_event_fab_mmio_write.attr.attr,
+ &fme_perf_event_fab_port_pcie0_read.attr.attr,
+ &fme_perf_event_fab_port_pcie0_write.attr.attr,
+ &fme_perf_event_fab_port_pcie1_read.attr.attr,
+ &fme_perf_event_fab_port_pcie1_write.attr.attr,
+ &fme_perf_event_fab_port_upi_read.attr.attr,
+ &fme_perf_event_fab_port_upi_write.attr.attr,
+ &fme_perf_event_fab_port_mmio_read.attr.attr,
+ &fme_perf_event_fab_port_mmio_write.attr.attr,
+ NULL,
+};
+
+static umode_t fme_perf_fabric_events_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+ struct dev_ext_attribute *eattr;
+ unsigned long var;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr.attr);
+ var = (unsigned long)eattr->var;
+
+ if (is_fabric_event_supported(priv, get_event(var), get_portid(var)))
+ return attr->mode;
+
+ return 0;
+}
+
+static const struct attribute_group fme_perf_fabric_events_group = {
+ .name = "events",
+ .attrs = fme_perf_fabric_events_attrs,
+ .is_visible = fme_perf_fabric_events_visible,
+};
+
+/* FME Perf VTD Events */
+#define FME_EVENT_VTD_PORT(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_vtd_port_##_name = { \
+ .attr = FME_EVENT_ATTR(vtd_port_##_name), \
+ .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_VTD), \
+}
+
+FME_EVENT_VTD_PORT(read_transaction, VTD_EVNT_AFU_MEM_RD_TRANS);
+FME_EVENT_VTD_PORT(write_transaction, VTD_EVNT_AFU_MEM_WR_TRANS);
+FME_EVENT_VTD_PORT(devtlb_read_hit, VTD_EVNT_AFU_DEVTLB_RD_HIT);
+FME_EVENT_VTD_PORT(devtlb_write_hit, VTD_EVNT_AFU_DEVTLB_WR_HIT);
+FME_EVENT_VTD_PORT(devtlb_4k_fill, VTD_EVNT_DEVTLB_4K_FILL);
+FME_EVENT_VTD_PORT(devtlb_2m_fill, VTD_EVNT_DEVTLB_2M_FILL);
+FME_EVENT_VTD_PORT(devtlb_1g_fill, VTD_EVNT_DEVTLB_1G_FILL);
+
+static struct attribute *fme_perf_vtd_events_attrs[] = {
+ &fme_perf_event_vtd_port_read_transaction.attr.attr,
+ &fme_perf_event_vtd_port_write_transaction.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_read_hit.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_write_hit.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_4k_fill.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_2m_fill.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_1g_fill.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_events_group = {
+ .name = "events",
+ .attrs = fme_perf_vtd_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf VTD SIP Events */
+#define FME_EVENT_VTD_SIP(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_vtd_sip_##_name = { \
+ .attr = FME_EVENT_ATTR(vtd_sip_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_VTD_SIP), \
+}
+
+FME_EVENT_VTD_SIP(iotlb_4k_hit, VTD_SIP_EVNT_IOTLB_4K_HIT);
+FME_EVENT_VTD_SIP(iotlb_2m_hit, VTD_SIP_EVNT_IOTLB_2M_HIT);
+FME_EVENT_VTD_SIP(iotlb_1g_hit, VTD_SIP_EVNT_IOTLB_1G_HIT);
+FME_EVENT_VTD_SIP(slpwc_l3_hit, VTD_SIP_EVNT_SLPWC_L3_HIT);
+FME_EVENT_VTD_SIP(slpwc_l4_hit, VTD_SIP_EVNT_SLPWC_L4_HIT);
+FME_EVENT_VTD_SIP(rcc_hit, VTD_SIP_EVNT_RCC_HIT);
+FME_EVENT_VTD_SIP(iotlb_4k_miss, VTD_SIP_EVNT_IOTLB_4K_MISS);
+FME_EVENT_VTD_SIP(iotlb_2m_miss, VTD_SIP_EVNT_IOTLB_2M_MISS);
+FME_EVENT_VTD_SIP(iotlb_1g_miss, VTD_SIP_EVNT_IOTLB_1G_MISS);
+FME_EVENT_VTD_SIP(slpwc_l3_miss, VTD_SIP_EVNT_SLPWC_L3_MISS);
+FME_EVENT_VTD_SIP(slpwc_l4_miss, VTD_SIP_EVNT_SLPWC_L4_MISS);
+FME_EVENT_VTD_SIP(rcc_miss, VTD_SIP_EVNT_RCC_MISS);
+
+static struct attribute *fme_perf_vtd_sip_events_attrs[] = {
+ &fme_perf_event_vtd_sip_iotlb_4k_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_2m_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_1g_hit.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l3_hit.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l4_hit.attr.attr,
+ &fme_perf_event_vtd_sip_rcc_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_4k_miss.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_2m_miss.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_1g_miss.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l3_miss.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l4_miss.attr.attr,
+ &fme_perf_event_vtd_sip_rcc_miss.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_sip_events_group = {
+ .name = "events",
+ .attrs = fme_perf_vtd_sip_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+static const struct attribute_group *fme_perf_events_groups[] = {
+ &fme_perf_basic_events_group,
+ &fme_perf_cache_events_group,
+ &fme_perf_fabric_events_group,
+ &fme_perf_vtd_events_group,
+ &fme_perf_vtd_sip_events_group,
+ NULL,
+};
+
+static struct fme_perf_event_ops *get_event_ops(u32 evtype)
+{
+ if (evtype > FME_EVTYPE_MAX)
+ return NULL;
+
+ return &fme_perf_event_ops[evtype];
+}
+
+static void fme_perf_event_destroy(struct perf_event *event)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+
+ if (ops->event_destroy)
+ ops->event_destroy(priv, event->hw.idx, event->hw.config_base);
+}
+
+static int fme_perf_event_init(struct perf_event *event)
+{
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct fme_perf_event_ops *ops;
+ u32 eventid, evtype, portid;
+
+ /* test the event attr type check for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * fme counters are shared across all cores.
+ * Therefore, it does not support per-process mode.
+ * Also, it does not support event sampling mode.
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ if (event->cpu != priv->cpu)
+ return -EINVAL;
+
+ eventid = get_event(event->attr.config);
+ portid = get_portid(event->attr.config);
+ evtype = get_evtype(event->attr.config);
+ if (evtype > FME_EVTYPE_MAX)
+ return -EINVAL;
+
+ hwc->event_base = evtype;
+ hwc->idx = (int)eventid;
+ hwc->config_base = portid;
+
+ event->destroy = fme_perf_event_destroy;
+
+ dev_dbg(priv->dev, "%s event=0x%x, evtype=0x%x, portid=0x%x,\n",
+ __func__, eventid, evtype, portid);
+
+ ops = get_event_ops(evtype);
+ if (ops->event_init)
+ return ops->event_init(priv, eventid, portid);
+
+ return 0;
+}
+
+static void fme_perf_event_update(struct perf_event *event)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 now, prev, delta;
+
+ now = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+ prev = local64_read(&hwc->prev_count);
+ delta = now - prev;
+
+ local64_add(delta, &event->count);
+}
+
+static void fme_perf_event_start(struct perf_event *event, int flags)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count;
+
+ count = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+ local64_set(&hwc->prev_count, count);
+}
+
+static void fme_perf_event_stop(struct perf_event *event, int flags)
+{
+ fme_perf_event_update(event);
+}
+
+static int fme_perf_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ fme_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void fme_perf_event_del(struct perf_event *event, int flags)
+{
+ fme_perf_event_stop(event, PERF_EF_UPDATE);
+}
+
+static void fme_perf_event_read(struct perf_event *event)
+{
+ fme_perf_event_update(event);
+}
+
+static void fme_perf_setup_hardware(struct fme_perf_priv *priv)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ /* read and save current working mode for fabric counters */
+ v = readq(base + FAB_CTRL);
+
+ if (FIELD_GET(FAB_PORT_FILTER, v) == FAB_PORT_FILTER_DISABLE)
+ priv->fab_port_id = FME_PORTID_ROOT;
+ else
+ priv->fab_port_id = FIELD_GET(FAB_PORT_ID, v);
+}
+
+static int fme_perf_pmu_register(struct platform_device *pdev,
+ struct fme_perf_priv *priv)
+{
+ struct pmu *pmu = &priv->pmu;
+ char *name;
+ int ret;
+
+ spin_lock_init(&priv->fab_lock);
+
+ fme_perf_setup_hardware(priv);
+
+ pmu->task_ctx_nr = perf_invalid_context;
+ pmu->attr_groups = fme_perf_groups;
+ pmu->attr_update = fme_perf_events_groups;
+ pmu->event_init = fme_perf_event_init;
+ pmu->add = fme_perf_event_add;
+ pmu->del = fme_perf_event_del;
+ pmu->start = fme_perf_event_start;
+ pmu->stop = fme_perf_event_stop;
+ pmu->read = fme_perf_event_read;
+ pmu->capabilities = PERF_PMU_CAP_NO_INTERRUPT |
+ PERF_PMU_CAP_NO_EXCLUDE;
+
+ name = devm_kasprintf(priv->dev, GFP_KERNEL, "dfl_fme%d", pdev->id);
+
+ ret = perf_pmu_register(pmu, name, -1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void fme_perf_pmu_unregister(struct fme_perf_priv *priv)
+{
+ perf_pmu_unregister(&priv->pmu);
+}
+
+static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct fme_perf_priv *priv;
+ int target;
+
+ priv = hlist_entry_safe(node, struct fme_perf_priv, node);
+
+ if (cpu != priv->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ priv->cpu = target;
+ return 0;
+}
+
+static int fme_perf_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct fme_perf_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ priv->ioaddr = feature->ioaddr;
+ priv->id = feature->id;
+ priv->cpu = raw_smp_processor_id();
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/fpga/dfl_fme:online",
+ NULL, fme_perf_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ priv->cpuhp_state = ret;
+
+ /* Register the pmu instance for cpu hotplug */
+ ret = cpuhp_state_add_instance_nocalls(priv->cpuhp_state, &priv->node);
+ if (ret)
+ goto cpuhp_instance_err;
+
+ ret = fme_perf_pmu_register(pdev, priv);
+ if (ret)
+ goto pmu_register_err;
+
+ feature->priv = priv;
+ return 0;
+
+pmu_register_err:
+ cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+cpuhp_instance_err:
+ cpuhp_remove_multi_state(priv->cpuhp_state);
+ return ret;
+}
+
+static void fme_perf_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct fme_perf_priv *priv = feature->priv;
+
+ fme_perf_pmu_unregister(priv);
+ cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+ cpuhp_remove_multi_state(priv->cpuhp_state);
+}
+
+const struct dfl_feature_id fme_perf_id_table[] = {
+ {.id = FME_FEATURE_ID_GLOBAL_IPERF,},
+ {.id = FME_FEATURE_ID_GLOBAL_DPERF,},
+ {0,}
+};
+
+const struct dfl_feature_ops fme_perf_ops = {
+ .init = fme_perf_init,
+ .uinit = fme_perf_uinit,
+};
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
index 6685c8ef965b..4195dd68193e 100644
--- a/drivers/fpga/dfl-fme.h
+++ b/drivers/fpga/dfl-fme.h
@@ -38,5 +38,7 @@ extern const struct dfl_feature_id fme_pr_mgmt_id_table[];
extern const struct dfl_feature_ops fme_global_err_ops;
extern const struct dfl_feature_id fme_global_err_id_table[];
extern const struct attribute_group fme_global_err_group;
+extern const struct dfl_feature_ops fme_perf_ops;
+extern const struct dfl_feature_id fme_perf_id_table[];
#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 9f0e656de720..d312678a9aca 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -197,12 +197,14 @@ struct dfl_feature_driver {
* feature dev (platform device)'s reources.
* @ioaddr: mapped mmio resource address.
* @ops: ops of this sub feature.
+ * @priv: priv data of this feature.
*/
struct dfl_feature {
u64 id;
int resource_index;
void __iomem *ioaddr;
const struct dfl_feature_ops *ops;
+ void *priv;
};
#define DEV_STATUS_IN_USE 0
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 215d33789c74..bae98c8ce362 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -154,11 +154,11 @@ static void s10_receive_callback(struct stratix10_svc_client *client,
* Here we set status bits as we receive them. Elsewhere, we always use
* test_and_clear_bit() to check status in priv->status
*/
- for (i = 0; i <= SVC_STATUS_RECONFIG_ERROR; i++)
+ for (i = 0; i <= SVC_STATUS_ERROR; i++)
if (status & (1 << i))
set_bit(i, &priv->status);
- if (status & BIT(SVC_STATUS_RECONFIG_BUFFER_DONE)) {
+ if (status & BIT(SVC_STATUS_BUFFER_DONE)) {
s10_unlock_bufs(priv, data->kaddr1);
s10_unlock_bufs(priv, data->kaddr2);
s10_unlock_bufs(priv, data->kaddr3);
@@ -209,8 +209,7 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
}
ret = 0;
- if (!test_and_clear_bit(SVC_STATUS_RECONFIG_REQUEST_OK,
- &priv->status)) {
+ if (!test_and_clear_bit(SVC_STATUS_OK, &priv->status)) {
ret = -ETIMEDOUT;
goto init_done;
}
@@ -323,17 +322,15 @@ static int s10_ops_write(struct fpga_manager *mgr, const char *buf,
&priv->status_return_completion,
S10_BUFFER_TIMEOUT);
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_DONE,
- &priv->status) ||
- test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED,
+ if (test_and_clear_bit(SVC_STATUS_BUFFER_DONE, &priv->status) ||
+ test_and_clear_bit(SVC_STATUS_BUFFER_SUBMITTED,
&priv->status)) {
ret = 0;
continue;
}
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
- &priv->status)) {
- dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
+ if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+ dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
ret = -EFAULT;
break;
}
@@ -393,13 +390,11 @@ static int s10_ops_write_complete(struct fpga_manager *mgr,
timeout = ret;
ret = 0;
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_COMPLETED,
- &priv->status))
+ if (test_and_clear_bit(SVC_STATUS_COMPLETED, &priv->status))
break;
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
- &priv->status)) {
- dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
+ if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+ dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
ret = -EFAULT;
break;
}
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index b8a88d21d038..4a1139e05280 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -40,16 +40,12 @@ static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
const char *buf, size_t size)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
struct zynqmp_fpga_priv *priv;
dma_addr_t dma_addr;
u32 eemi_flags = 0;
char *kbuf;
int ret;
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_load)
- return -ENXIO;
-
priv = mgr->priv;
kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
@@ -63,7 +59,7 @@ static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
- ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
+ ret = zynqmp_pm_fpga_load(dma_addr, size, eemi_flags);
dma_free_coherent(priv->dev, size, kbuf, dma_addr);
@@ -78,13 +74,9 @@ static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr,
static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- u32 status;
-
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_get_status)
- return FPGA_MGR_STATE_UNKNOWN;
+ u32 status = 0;
- eemi_ops->fpga_get_status(&status);
+ zynqmp_pm_fpga_get_status(&status);
if (status & IXR_FPGA_DONE_MASK)
return FPGA_MGR_STATE_OPERATING;
diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig
index b84fcaf8b105..aeea082f1418 100644
--- a/drivers/greybus/Kconfig
+++ b/drivers/greybus/Kconfig
@@ -3,7 +3,7 @@ menuconfig GREYBUS
tristate "Greybus support"
depends on SYSFS
---help---
- This option enables the Greybus driver core. Greybus is an
+ This option enables the Greybus driver core. Greybus is a
hardware protocol that was designed to provide Unipro with a
sane application layer. It was originally designed for the
ARA project, a module phone system, but has shown up in other
@@ -12,7 +12,7 @@ menuconfig GREYBUS
Say Y here to enable support for these types of drivers.
- To compile this code as a module, chose M here: the module
+ To compile this code as a module, choose M here: the module
will be called greybus.ko
if GREYBUS
@@ -25,7 +25,7 @@ config GREYBUS_ES2
acts as a Greybus "host controller". This device is a bridge
from a USB device to a Unipro network.
- To compile this code as a module, chose M here: the module
+ To compile this code as a module, choose M here: the module
will be called gb-es2.ko
endif # GREYBUS
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 78c3b1d424c3..4ed86122eaec 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -639,8 +639,13 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
- clk_divider = SD_CLK_DIVIDE_128;
- card_clock = 30000000;
+ if (is_version(pcr, PID_5261, IC_VER_D)) {
+ clk_divider = SD_CLK_DIVIDE_256;
+ card_clock = 60000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ }
} else {
clk_divider = SD_CLK_DIVIDE_0;
}
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 886459e0ddd9..736675f0a246 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -208,7 +208,7 @@ void lkdtm_OVERFLOW_UNSIGNED(void)
ignored = value;
}
-/* Intentially using old-style flex array definition of 1 byte. */
+/* Intentionally using old-style flex array definition of 1 byte. */
struct array_bounds_flex_array {
int one;
int two;
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index fcd999f50d14..ea084626fe11 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -660,7 +660,7 @@ int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
struct device *spdev = NULL;
if (msg->uop > SCIF_EXIT_ACK) {
- /* Dont send messages once the exit flow has begun */
+ /* Don't send messages once the exit flow has begun */
if (OP_IDLE != scifdev->exit)
return -ENODEV;
spdev = scif_get_peer_dev(scifdev);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 79a963105983..d5e097cd556d 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -59,16 +59,16 @@
/* define two XPC debug device structures to be used with dev_dbg() et al */
-struct device_driver xpc_dbg_name = {
+static struct device_driver xpc_dbg_name = {
.name = "xpc"
};
-struct device xpc_part_dbg_subname = {
+static struct device xpc_part_dbg_subname = {
.init_name = "", /* set to "part" at xpc_init() time */
.driver = &xpc_dbg_name
};
-struct device xpc_chan_dbg_subname = {
+static struct device xpc_chan_dbg_subname = {
.init_name = "", /* set to "chan" at xpc_init() time */
.driver = &xpc_dbg_name
};
@@ -1217,7 +1217,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
return NOTIFY_DONE;
}
-int __init
+static int __init
xpc_init(void)
{
int ret;
@@ -1319,7 +1319,7 @@ out_1:
module_init(xpc_init);
-void __exit
+static void __exit
xpc_exit(void)
{
xpc_do_exit(xpUnloading);
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index ada94e6a3c91..837d6c3fe69c 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -96,7 +96,7 @@ struct xpnet_pending_msg {
atomic_t use_count;
};
-struct net_device *xpnet_device;
+static struct net_device *xpnet_device;
/*
* When we are notified of other partitions activating, we add them to
@@ -131,16 +131,16 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
/* Define the XPNET debug device structures to be used with dev_dbg() et al */
-struct device_driver xpnet_dbg_name = {
+static struct device_driver xpnet_dbg_name = {
.name = "xpnet"
};
-struct device xpnet_dbg_subname = {
+static struct device xpnet_dbg_subname = {
.init_name = "", /* set to "" */
.driver = &xpnet_dbg_name
};
-struct device *xpnet = &xpnet_dbg_subname;
+static struct device *xpnet = &xpnet_dbg_subname;
/*
* Packet was recevied by XPC and forwarded to us.
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 71bbaa56bdb5..7a7589417e87 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -649,14 +649,9 @@ static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
struct xsdfec_ldpc_params *ldpc;
int ret, n;
- ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
- if (!ldpc)
- return -ENOMEM;
-
- if (copy_from_user(ldpc, arg, sizeof(*ldpc))) {
- ret = -EFAULT;
- goto err_out;
- }
+ ldpc = memdup_user(arg, sizeof(*ldpc));
+ if (IS_ERR(ldpc))
+ return PTR_ERR(ldpc);
if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
ret = -EIO;
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index d4905c106c06..d01f76223387 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -98,10 +98,6 @@ struct sdhci_arasan_clk_data {
void *clk_of_data;
};
-struct sdhci_arasan_zynqmp_clk_data {
- const struct zynqmp_eemi_ops *eemi_ops;
-};
-
/**
* struct sdhci_arasan_data
* @host: Pointer to the main SDHCI host structure.
@@ -630,9 +626,6 @@ static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
struct sdhci_arasan_data *sdhci_arasan =
container_of(clk_data, struct sdhci_arasan_data, clk_data);
struct sdhci_host *host = sdhci_arasan->host;
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
- clk_data->clk_of_data;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
const char *clk_name = clk_hw_get_name(hw);
u32 node_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 : NODE_SD_1;
u8 tap_delay, tap_max = 0;
@@ -672,8 +665,7 @@ static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
tap_delay = (degrees * tap_max) / 360;
/* Set the Clock Phase */
- ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
- PM_TAPDELAY_OUTPUT, tap_delay, NULL);
+ ret = zynqmp_pm_set_sd_tapdelay(node_id, PM_TAPDELAY_OUTPUT, tap_delay);
if (ret)
pr_err("Error setting Output Tap Delay\n");
@@ -702,9 +694,6 @@ static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
struct sdhci_arasan_data *sdhci_arasan =
container_of(clk_data, struct sdhci_arasan_data, clk_data);
struct sdhci_host *host = sdhci_arasan->host;
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
- clk_data->clk_of_data;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
const char *clk_name = clk_hw_get_name(hw);
u32 node_id = !strcmp(clk_name, "clk_in_sd0") ? NODE_SD_0 : NODE_SD_1;
u8 tap_delay, tap_max = 0;
@@ -744,8 +733,7 @@ static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
tap_delay = (degrees * tap_max) / 360;
/* Set the Clock Phase */
- ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
- PM_TAPDELAY_INPUT, tap_delay, NULL);
+ ret = zynqmp_pm_set_sd_tapdelay(node_id, PM_TAPDELAY_INPUT, tap_delay);
if (ret)
pr_err("Error setting Input Tap Delay\n");
@@ -759,11 +747,6 @@ static const struct clk_ops zynqmp_sampleclk_ops = {
static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
- sdhci_arasan->clk_data.clk_of_data;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
u16 clk;
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
@@ -771,8 +754,7 @@ static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Issue DLL Reset */
- eemi_ops->ioctl(deviceid, IOCTL_SD_DLL_RESET,
- PM_DLL_RESET_PULSE, 0, NULL);
+ zynqmp_pm_sd_dll_reset(deviceid, PM_DLL_RESET_PULSE);
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
@@ -1277,20 +1259,6 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto clk_disable_all;
if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data;
- const struct zynqmp_eemi_ops *eemi_ops;
-
- zynqmp_clk_data = devm_kzalloc(&pdev->dev,
- sizeof(*zynqmp_clk_data),
- GFP_KERNEL);
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops)) {
- ret = PTR_ERR(eemi_ops);
- goto unreg_clk;
- }
-
- zynqmp_clk_data->eemi_ops = eemi_ops;
- sdhci_arasan->clk_data.clk_of_data = zynqmp_clk_data;
host->mmc_host_ops.execute_tuning =
arasan_zynqmp_execute_tuning;
}
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 05c6ae4b0b97..ad6e55a75bdb 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -167,11 +167,8 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
return count;
}
-static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *attr, int i)
+static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct nvmem_device *nvmem = to_nvmem_device(dev);
umode_t mode = 0400;
if (!nvmem->root_only)
@@ -189,6 +186,15 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
return mode;
}
+static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ return nvmem_bin_attr_get_umode(nvmem);
+}
+
/* default read/write permissions */
static struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
@@ -215,34 +221,14 @@ static const struct attribute_group *nvmem_dev_groups[] = {
NULL,
};
-/* read only permission */
-static struct bin_attribute bin_attr_ro_nvmem = {
+static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
.attr = {
- .name = "nvmem",
- .mode = 0444,
- },
- .read = bin_attr_nvmem_read,
-};
-
-/* default read/write permissions, root only */
-static struct bin_attribute bin_attr_rw_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0600,
+ .name = "eeprom",
},
.read = bin_attr_nvmem_read,
.write = bin_attr_nvmem_write,
};
-/* read only permission, root only */
-static struct bin_attribute bin_attr_ro_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0400,
- },
- .read = bin_attr_nvmem_read,
-};
-
/*
* nvmem_setup_compat() - Create an additional binary entry in
* drivers sys directory, to be backwards compatible with the older
@@ -259,18 +245,8 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
if (!config->base_dev)
return -EINVAL;
- if (nvmem->read_only) {
- if (config->root_only)
- nvmem->eeprom = bin_attr_ro_root_nvmem;
- else
- nvmem->eeprom = bin_attr_ro_nvmem;
- } else {
- if (config->root_only)
- nvmem->eeprom = bin_attr_rw_root_nvmem;
- else
- nvmem->eeprom = bin_attr_rw_nvmem;
- }
- nvmem->eeprom.attr.name = "eeprom";
+ nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
+ nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
nvmem->eeprom.size = nvmem->size;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
nvmem->eeprom.attr.key = &eeprom_lock_key;
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index 5893543918c8..e28d7b133e11 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -16,8 +16,6 @@ struct zynqmp_nvmem_data {
struct nvmem_device *nvmem;
};
-static const struct zynqmp_eemi_ops *eemi_ops;
-
static int zynqmp_nvmem_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
@@ -25,10 +23,7 @@ static int zynqmp_nvmem_read(void *context, unsigned int offset,
int idcode, version;
struct zynqmp_nvmem_data *priv = context;
- if (!eemi_ops->get_chipid)
- return -ENXIO;
-
- ret = eemi_ops->get_chipid(&idcode, &version);
+ ret = zynqmp_pm_get_chipid(&idcode, &version);
if (ret < 0)
return ret;
@@ -61,10 +56,6 @@ static int zynqmp_nvmem_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
priv->dev = dev;
econfig.dev = dev;
econfig.reg_read = zynqmp_nvmem_read;
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 3b00e2c8e2e9..6d78ec3a762f 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -30,12 +30,6 @@
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK(stuff...) printk(stuff)
-#else
-#define DPRINTK(stuff...)
-#endif
-
static struct daisydev {
struct daisydev *next;
struct parport *port;
@@ -145,8 +139,7 @@ again:
((num_ports = num_mux_ports(port)) == 2 || num_ports == 4)) {
/* Leave original as port zero. */
port->muxport = 0;
- printk(KERN_INFO
- "%s: 1st (default) port of %d-way multiplexor\n",
+ pr_info("%s: 1st (default) port of %d-way multiplexor\n",
port->name, num_ports);
for (i = 1; i < num_ports; i++) {
/* Clone the port. */
@@ -159,8 +152,7 @@ again:
continue;
}
- printk(KERN_INFO
- "%s: %d%s port of %d-way multiplexor on %s\n",
+ pr_info("%s: %d%s port of %d-way multiplexor on %s\n",
extra->name, i + 1, th[i + 1], num_ports,
port->name);
@@ -323,8 +315,7 @@ static int cpp_daisy(struct parport *port, int cmd)
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: cpp_daisy: aa5500ff(%02x)\n",
- port->name, s);
+ pr_debug("%s: cpp_daisy: aa5500ff(%02x)\n", port->name, s);
return -ENXIO;
}
@@ -334,8 +325,7 @@ static int cpp_daisy(struct parport *port, int cmd)
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR);
if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: cpp_daisy: aa5500ff87(%02x)\n",
- port->name, s);
+ pr_debug("%s: cpp_daisy: aa5500ff87(%02x)\n", port->name, s);
return -ENXIO;
}
@@ -370,7 +360,7 @@ static int cpp_mux(struct parport *port, int cmd)
s = parport_read_status(port);
if (!(s & PARPORT_STATUS_ACK)) {
- DPRINTK(KERN_DEBUG "%s: cpp_mux: aa55f00f52ad%02x(%02x)\n",
+ pr_debug("%s: cpp_mux: aa55f00f52ad%02x(%02x)\n",
port->name, cmd, s);
return -EIO;
}
@@ -456,8 +446,7 @@ static int assign_addrs(struct parport *port)
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: assign_addrs: aa5500ff(%02x)\n",
- port->name, s);
+ pr_debug("%s: assign_addrs: aa5500ff(%02x)\n", port->name, s);
return 0;
}
@@ -467,8 +456,7 @@ static int assign_addrs(struct parport *port)
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR);
if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: assign_addrs: aa5500ff87(%02x)\n",
- port->name, s);
+ pr_debug("%s: assign_addrs: aa5500ff87(%02x)\n", port->name, s);
return 0;
}
@@ -505,8 +493,7 @@ static int assign_addrs(struct parport *port)
parport_write_data(port, 0xff); udelay(2);
detected = numdevs - thisdev;
- DPRINTK(KERN_DEBUG "%s: Found %d daisy-chained devices\n", port->name,
- detected);
+ pr_debug("%s: Found %d daisy-chained devices\n", port->name, detected);
/* Ask the new devices to introduce themselves. */
deviceid = kmalloc(1024, GFP_KERNEL);
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index 90fb73575495..f28d6a3c5a68 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -31,12 +31,6 @@
#undef DEBUG /* Don't want a garbled console */
#endif
-#ifdef DEBUG
-#define DPRINTK(stuff...) printk (stuff)
-#else
-#define DPRINTK(stuff...)
-#endif
-
/* Make parport_wait_peripheral wake up.
* It will be useful to call this from an interrupt handler. */
static void parport_ieee1284_wakeup (struct parport *port)
@@ -258,12 +252,11 @@ static void parport_ieee1284_terminate (struct parport *port)
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r)
- DPRINTK (KERN_INFO "%s: Timeout at event 49\n",
+ pr_debug("%s: Timeout at event 49\n",
port->name);
parport_data_forward (port);
- DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
- port->name);
+ pr_debug("%s: ECP direction: forward\n", port->name);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
@@ -281,8 +274,7 @@ static void parport_ieee1284_terminate (struct parport *port)
/* Event 24: nAck goes low */
r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0);
if (r)
- DPRINTK (KERN_INFO "%s: Timeout at event 24\n",
- port->name);
+ pr_debug("%s: Timeout at event 24\n", port->name);
/* Event 25: Set nAutoFd low */
parport_frob_control (port,
@@ -294,8 +286,7 @@ static void parport_ieee1284_terminate (struct parport *port)
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK);
if (r)
- DPRINTK (KERN_INFO "%s: Timeout at event 27\n",
- port->name);
+ pr_debug("%s: Timeout at event 27\n", port->name);
/* Event 29: Set nAutoFd high */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
@@ -304,8 +295,7 @@ static void parport_ieee1284_terminate (struct parport *port)
port->ieee1284.mode = IEEE1284_MODE_COMPAT;
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
- DPRINTK (KERN_DEBUG "%s: In compatibility (forward idle) mode\n",
- port->name);
+ pr_debug("%s: In compatibility (forward idle) mode\n", port->name);
}
#endif /* IEEE1284 support */
@@ -329,7 +319,7 @@ int parport_negotiate (struct parport *port, int mode)
#ifndef CONFIG_PARPORT_1284
if (mode == IEEE1284_MODE_COMPAT)
return 0;
- printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n");
+ pr_err("parport: IEEE1284 not supported in this kernel\n");
return -1;
#else
int m = mode & ~IEEE1284_ADDR;
@@ -406,8 +396,7 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_SELECT);
- DPRINTK (KERN_DEBUG
- "%s: Peripheral not IEEE1284 compliant (0x%02X)\n",
+ pr_debug("%s: Peripheral not IEEE1284 compliant (0x%02X)\n",
port->name, parport_read_status (port));
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return -1; /* Not IEEE1284 compliant */
@@ -430,8 +419,7 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant device. */
- DPRINTK (KERN_DEBUG
- "%s: Mode 0x%02x not supported? (0x%02x)\n",
+ pr_debug("%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode, port->ops->read_status (port));
parport_ieee1284_terminate (port);
return 1;
@@ -442,7 +430,7 @@ int parport_negotiate (struct parport *port, int mode)
/* xflag should be high for all modes other than nibble (0). */
if (mode && !xflag) {
/* Mode not supported. */
- DPRINTK (KERN_DEBUG "%s: Mode 0x%02x rejected by peripheral\n",
+ pr_debug("%s: Mode 0x%02x rejected by peripheral\n",
port->name, mode);
parport_ieee1284_terminate (port);
return 1;
@@ -463,9 +451,7 @@ int parport_negotiate (struct parport *port, int mode)
/* Event 52: nAck goes low */
if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) {
/* This peripheral is _very_ slow. */
- DPRINTK (KERN_DEBUG
- "%s: Event 52 didn't happen\n",
- port->name);
+ pr_debug("%s: Event 52 didn't happen\n", port->name);
parport_ieee1284_terminate (port);
return 1;
}
@@ -481,10 +467,9 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant
* device. */
- DPRINTK (KERN_DEBUG
- "%s: Mode 0x%02x not supported? (0x%02x)\n",
+ pr_debug("%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode,
- port->ops->read_status (port));
+ port->ops->read_status(port));
parport_ieee1284_terminate (port);
return 1;
}
@@ -495,8 +480,8 @@ int parport_negotiate (struct parport *port, int mode)
/* xflag should be high. */
if (!xflag) {
/* Extended mode not supported. */
- DPRINTK (KERN_DEBUG "%s: Extended mode 0x%02x not "
- "supported\n", port->name, mode);
+ pr_debug("%s: Extended mode 0x%02x not supported\n",
+ port->name, mode);
parport_ieee1284_terminate (port);
return 1;
}
@@ -505,7 +490,7 @@ int parport_negotiate (struct parport *port, int mode)
}
/* Mode is supported */
- DPRINTK (KERN_DEBUG "%s: In mode 0x%02x\n", port->name, mode);
+ pr_debug("%s: In mode 0x%02x\n", port->name, mode);
port->ieee1284.mode = mode;
/* But ECP is special */
@@ -522,13 +507,11 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
- DPRINTK (KERN_INFO "%s: Timeout at event 31\n",
- port->name);
+ pr_debug("%s: Timeout at event 31\n", port->name);
}
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
- DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
- port->name);
+ pr_debug("%s: ECP direction: forward\n", port->name);
} else switch (mode) {
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
@@ -573,7 +556,7 @@ void parport_ieee1284_interrupt (void *handle)
if (port->ieee1284.phase == IEEE1284_PH_REV_IDLE) {
/* An interrupt in this phase means that data
* is now available. */
- DPRINTK (KERN_DEBUG "%s: Data available\n", port->name);
+ pr_debug("%s: Data available\n", port->name);
parport_ieee1284_ack_data_avail (port);
}
#endif /* IEEE1284 support */
@@ -617,13 +600,12 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
parport_negotiate (port, IEEE1284_MODE_COMPAT);
/* fall through */
case IEEE1284_MODE_COMPAT:
- DPRINTK (KERN_DEBUG "%s: Using compatibility mode\n",
- port->name);
+ pr_debug("%s: Using compatibility mode\n", port->name);
fn = port->ops->compat_write_data;
break;
case IEEE1284_MODE_EPP:
- DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name);
+ pr_debug("%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_write_addr;
} else {
@@ -631,8 +613,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
}
break;
case IEEE1284_MODE_EPPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated EPP mode\n", port->name);
if (addr) {
fn = parport_ieee1284_epp_write_addr;
} else {
@@ -641,7 +622,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
- DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name);
+ pr_debug("%s: Using ECP mode\n", port->name);
if (addr) {
fn = port->ops->ecp_write_addr;
} else {
@@ -650,8 +631,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
break;
case IEEE1284_MODE_ECPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated ECP mode\n", port->name);
/* The caller has specified that it must be emulated,
* even if we have ECP hardware! */
if (addr) {
@@ -662,13 +642,13 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
break;
default:
- DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name,
- port->ieee1284.mode);
+ pr_debug("%s: Unknown mode 0x%02x\n",
+ port->name, port->ieee1284.mode);
return -ENOSYS;
}
retval = (*fn) (port, buffer, len, 0);
- DPRINTK (KERN_DEBUG "%s: wrote %d/%d bytes\n", port->name, retval, len);
+ pr_debug("%s: wrote %zd/%zu bytes\n", port->name, retval, len);
return retval;
#endif /* IEEE1284 support */
}
@@ -694,7 +674,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
ssize_t parport_read (struct parport *port, void *buffer, size_t len)
{
#ifndef CONFIG_PARPORT_1284
- printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n");
+ pr_err("parport: IEEE1284 not supported in this kernel\n");
return -ENODEV;
#else
int mode = port->physport->ieee1284.mode;
@@ -715,7 +695,7 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
if ((port->physport->modes & PARPORT_MODE_TRISTATE) &&
!parport_negotiate (port, IEEE1284_MODE_BYTE)) {
/* got into BYTE mode OK */
- DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name);
+ pr_debug("%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
}
@@ -724,17 +704,17 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
}
/* fall through - to NIBBLE */
case IEEE1284_MODE_NIBBLE:
- DPRINTK (KERN_DEBUG "%s: Using nibble mode\n", port->name);
+ pr_debug("%s: Using nibble mode\n", port->name);
fn = port->ops->nibble_read_data;
break;
case IEEE1284_MODE_BYTE:
- DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name);
+ pr_debug("%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
case IEEE1284_MODE_EPP:
- DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name);
+ pr_debug("%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_read_addr;
} else {
@@ -742,8 +722,7 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
}
break;
case IEEE1284_MODE_EPPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated EPP mode\n", port->name);
if (addr) {
fn = parport_ieee1284_epp_read_addr;
} else {
@@ -752,19 +731,18 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
- DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name);
+ pr_debug("%s: Using ECP mode\n", port->name);
fn = port->ops->ecp_read_data;
break;
case IEEE1284_MODE_ECPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated ECP mode\n", port->name);
fn = parport_ieee1284_ecp_read_data;
break;
default:
- DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name,
- port->physport->ieee1284.mode);
+ pr_debug("%s: Unknown mode 0x%02x\n",
+ port->name, port->physport->ieee1284.mode);
return -ENOSYS;
}
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
index 5d41dda6da4e..2c11bd3fe1fd 100644
--- a/drivers/parport/ieee1284_ops.c
+++ b/drivers/parport/ieee1284_ops.c
@@ -27,12 +27,6 @@
#undef DEBUG /* Don't want a garbled console */
#endif
-#ifdef DEBUG
-#define DPRINTK(stuff...) printk (stuff)
-#else
-#define DPRINTK(stuff...)
-#endif
-
/*** *
* One-way data transfer functions. *
* ***/
@@ -115,7 +109,7 @@ size_t parport_ieee1284_write_compat (struct parport *port,
if (signal_pending (current))
break;
- DPRINTK (KERN_DEBUG "%s: Timed out\n", port->name);
+ pr_debug("%s: Timed out\n", port->name);
break;
ready:
@@ -178,9 +172,8 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK, 0)) {
/* Timeout -- no more data? */
- DPRINTK (KERN_DEBUG
- "%s: Nibble timeout at event 9 (%d bytes)\n",
- port->name, i/2);
+ pr_debug("%s: Nibble timeout at event 9 (%d bytes)\n",
+ port->name, i / 2);
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
break;
}
@@ -201,8 +194,7 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* Timeout -- no more data? */
- DPRINTK (KERN_DEBUG
- "%s: Nibble timeout at event 11\n",
+ pr_debug("%s: Nibble timeout at event 11\n",
port->name);
break;
}
@@ -219,9 +211,8 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
/* Read the last nibble without checking data avail. */
if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
end_of_data:
- DPRINTK (KERN_DEBUG
- "%s: No more nibble data (%d bytes)\n",
- port->name, i/2);
+ pr_debug("%s: No more nibble data (%d bytes)\n",
+ port->name, i / 2);
/* Go to reverse idle phase. */
parport_frob_control (port,
@@ -272,8 +263,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
/* Timeout -- no more data? */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD,
0);
- DPRINTK (KERN_DEBUG "%s: Byte timeout at event 9\n",
- port->name);
+ pr_debug("%s: Byte timeout at event 9\n", port->name);
break;
}
@@ -288,8 +278,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* Timeout -- no more data? */
- DPRINTK (KERN_DEBUG "%s: Byte timeout at event 11\n",
- port->name);
+ pr_debug("%s: Byte timeout at event 11\n", port->name);
break;
}
@@ -307,8 +296,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
/* Read the last byte without checking data avail. */
if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
end_of_data:
- DPRINTK (KERN_DEBUG
- "%s: No more byte data (%zd bytes)\n",
+ pr_debug("%s: No more byte data (%zd bytes)\n",
port->name, count);
/* Go to reverse idle phase. */
@@ -353,12 +341,10 @@ int ecp_forward_to_reverse (struct parport *port)
PARPORT_STATUS_PAPEROUT, 0);
if (!retval) {
- DPRINTK (KERN_DEBUG "%s: ECP direction: reverse\n",
- port->name);
+ pr_debug("%s: ECP direction: reverse\n", port->name);
port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
} else {
- DPRINTK (KERN_DEBUG "%s: ECP direction: failed to reverse\n",
- port->name);
+ pr_debug("%s: ECP direction: failed to reverse\n", port->name);
port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
}
@@ -384,12 +370,10 @@ int ecp_reverse_to_forward (struct parport *port)
if (!retval) {
parport_data_forward (port);
- DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
- port->name);
+ pr_debug("%s: ECP direction: forward\n", port->name);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
} else {
- DPRINTK (KERN_DEBUG
- "%s: ECP direction: failed to switch forward\n",
+ pr_debug("%s: ECP direction: failed to switch forward\n",
port->name);
port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
}
@@ -450,7 +434,7 @@ size_t parport_ieee1284_ecp_write_data (struct parport *port,
}
/* Time for Host Transfer Recovery (page 41 of IEEE1284) */
- DPRINTK (KERN_DEBUG "%s: ECP transfer stalled!\n", port->name);
+ pr_debug("%s: ECP transfer stalled!\n", port->name);
parport_frob_control (port, PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
@@ -466,8 +450,7 @@ size_t parport_ieee1284_ecp_write_data (struct parport *port,
if (!(parport_read_status (port) & PARPORT_STATUS_PAPEROUT))
break;
- DPRINTK (KERN_DEBUG "%s: Host transfer recovered\n",
- port->name);
+ pr_debug("%s: Host transfer recovered\n", port->name);
if (time_after_eq (jiffies, expire)) break;
goto try_again;
@@ -565,23 +548,20 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
command or a normal data byte, don't accept it. */
if (command) {
if (byte & 0x80) {
- DPRINTK (KERN_DEBUG "%s: stopping short at "
- "channel command (%02x)\n",
+ pr_debug("%s: stopping short at channel command (%02x)\n",
port->name, byte);
goto out;
}
else if (port->ieee1284.mode != IEEE1284_MODE_ECPRLE)
- DPRINTK (KERN_DEBUG "%s: device illegally "
- "using RLE; accepting anyway\n",
+ pr_debug("%s: device illegally using RLE; accepting anyway\n",
port->name);
rle_count = byte + 1;
/* Are we allowed to read that many bytes? */
if (rle_count > (len - count)) {
- DPRINTK (KERN_DEBUG "%s: leaving %d RLE bytes "
- "for next time\n", port->name,
- rle_count);
+ pr_debug("%s: leaving %d RLE bytes for next time\n",
+ port->name, rle_count);
break;
}
@@ -596,11 +576,10 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
PARPORT_STATUS_ACK)) {
/* It's gone wrong. Return what data we have
to the caller. */
- DPRINTK (KERN_DEBUG "ECP read timed out at 45\n");
+ pr_debug("ECP read timed out at 45\n");
if (command)
- printk (KERN_WARNING
- "%s: command ignored (%02x)\n",
+ pr_warn("%s: command ignored (%02x)\n",
port->name, byte);
break;
@@ -620,7 +599,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
memset (buf, byte, rle_count);
buf += rle_count;
count += rle_count;
- DPRINTK (KERN_DEBUG "%s: decompressed to %d bytes\n",
+ pr_debug("%s: decompressed to %d bytes\n",
port->name, rle_count);
} else {
/* Normal data byte. */
@@ -686,7 +665,7 @@ size_t parport_ieee1284_ecp_write_addr (struct parport *port,
}
/* Time for Host Transfer Recovery (page 41 of IEEE1284) */
- DPRINTK (KERN_DEBUG "%s: ECP transfer stalled!\n", port->name);
+ pr_debug("%s: ECP transfer stalled!\n", port->name);
parport_frob_control (port, PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
@@ -702,8 +681,7 @@ size_t parport_ieee1284_ecp_write_addr (struct parport *port,
if (!(parport_read_status (port) & PARPORT_STATUS_PAPEROUT))
break;
- DPRINTK (KERN_DEBUG "%s: Host transfer recovered\n",
- port->name);
+ pr_debug("%s: Host transfer recovered\n", port->name);
if (time_after_eq (jiffies, expire)) break;
goto try_again;
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index 3301861f69fa..1e88bcfe0d7b 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -28,16 +28,10 @@
#include <asm/amigaints.h>
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK printk
-#else
-#define DPRINTK(x...) do { } while (0)
-#endif
-
static void amiga_write_data(struct parport *p, unsigned char data)
{
- DPRINTK(KERN_DEBUG "write_data %c\n",data);
+ pr_debug("write_data %c\n", data);
/* Triggers also /STROBE. This behavior cannot be changed */
ciaa.prb = data;
mb();
@@ -59,13 +53,13 @@ static unsigned char control_amiga_to_pc(unsigned char control)
static void amiga_write_control(struct parport *p, unsigned char control)
{
- DPRINTK(KERN_DEBUG "write_control %02x\n",control);
+ pr_debug("write_control %02x\n", control);
/* No implementation possible */
}
static unsigned char amiga_read_control( struct parport *p)
{
- DPRINTK(KERN_DEBUG "read_control \n");
+ pr_debug("read_control\n");
return control_amiga_to_pc(0);
}
@@ -73,7 +67,7 @@ static unsigned char amiga_frob_control( struct parport *p, unsigned char mask,
{
unsigned char old;
- DPRINTK(KERN_DEBUG "frob_control mask %02x, value %02x\n",mask,val);
+ pr_debug("frob_control mask %02x, value %02x\n", mask, val);
old = amiga_read_control(p);
amiga_write_control(p, (old & ~mask) ^ val);
return old;
@@ -99,7 +93,7 @@ static unsigned char amiga_read_status(struct parport *p)
unsigned char status;
status = status_amiga_to_pc(ciab.pra & 7);
- DPRINTK(KERN_DEBUG "read_status %02x\n", status);
+ pr_debug("read_status %02x\n", status);
return status;
}
@@ -115,14 +109,14 @@ static void amiga_disable_irq(struct parport *p)
static void amiga_data_forward(struct parport *p)
{
- DPRINTK(KERN_DEBUG "forward\n");
+ pr_debug("forward\n");
ciaa.ddrb = 0xff; /* all pins output */
mb();
}
static void amiga_data_reverse(struct parport *p)
{
- DPRINTK(KERN_DEBUG "reverse\n");
+ pr_debug("reverse\n");
ciaa.ddrb = 0; /* all pins input */
mb();
}
@@ -212,7 +206,7 @@ static int __init amiga_parallel_probe(struct platform_device *pdev)
if (err)
goto out_irq;
- printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name);
+ pr_info("%s: Amiga built-in port using irq\n", p->name);
/* XXX: set operating mode */
parport_announce_port(p);
diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c
index f8dd368bfdbb..2ff0fe053e6e 100644
--- a/drivers/parport/parport_atari.c
+++ b/drivers/parport/parport_atari.c
@@ -200,7 +200,7 @@ static int __init parport_atari_init(void)
}
this_port = p;
- printk(KERN_INFO "%s: Atari built-in port using irq\n", p->name);
+ pr_info("%s: Atari built-in port using irq\n", p->name);
parport_announce_port (p);
return 0;
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index e77044c2bf62..8e7e3ac4bb87 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -142,10 +142,8 @@ static int parport_config(struct pcmcia_device *link)
link->irq, PARPORT_DMA_NONE,
&link->dev, IRQF_SHARED);
if (p == NULL) {
- printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at "
- "0x%3x, irq %u failed\n",
- (unsigned int) link->resource[0]->start,
- link->irq);
+ pr_notice("parport_cs: parport_pc_probe_port() at 0x%3x, irq %u failed\n",
+ (unsigned int)link->resource[0]->start, link->irq);
goto failed;
}
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 922535a118ba..9228e8f90309 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -238,14 +238,14 @@ struct parport *parport_gsc_probe_port(unsigned long base,
priv = kzalloc (sizeof (struct parport_gsc_private), GFP_KERNEL);
if (!priv) {
- printk (KERN_DEBUG "parport (0x%lx): no memory!\n", base);
+ printk(KERN_DEBUG "parport (0x%lx): no memory!\n", base);
return NULL;
}
ops = kmemdup(&parport_gsc_ops, sizeof(struct parport_operations),
GFP_KERNEL);
if (!ops) {
- printk (KERN_DEBUG "parport (0x%lx): no memory for ops!\n",
- base);
+ printk(KERN_DEBUG "parport (0x%lx): no memory for ops!\n",
+ base);
kfree (priv);
return NULL;
}
@@ -282,7 +282,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
p->private_data = priv;
- printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
+ pr_info("%s: PC-style at 0x%lx", p->name, p->base);
p->irq = irq;
if (p->irq == PARPORT_IRQ_AUTO) {
p->irq = PARPORT_IRQ_NONE;
@@ -299,12 +299,16 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->dma = PARPORT_DMA_NONE;
pr_cont(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
{
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
- printmode(COMPAT)
+ printmode(COMPAT);
printmode(EPP);
// printmode(ECP);
// printmode(DMA);
@@ -315,8 +319,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler,
0, p->name, p)) {
- printk (KERN_WARNING "%s: irq %d in use, "
- "resorting to polled operation\n",
+ pr_warn("%s: irq %d in use, resorting to polled operation\n",
p->name, p->irq);
p->irq = PARPORT_IRQ_NONE;
p->dma = PARPORT_DMA_NONE;
@@ -347,7 +350,7 @@ static int __init parport_init_chip(struct parisc_device *dev)
unsigned long port;
if (!dev->irq) {
- printk(KERN_WARNING "IRQ not found for parallel device at 0x%llx\n",
+ pr_warn("IRQ not found for parallel device at 0x%llx\n",
(unsigned long long)dev->hpa.start);
return -ENODEV;
}
@@ -360,11 +363,11 @@ static int __init parport_init_chip(struct parisc_device *dev)
if (boot_cpu_data.cpu_type > pcxt && !pdc_add_valid(port+4)) {
/* Initialize bidirectional-mode (0x10) & data-tranfer-mode #1 (0x20) */
- printk("%s: initialize bidirectional-mode.\n", __func__);
+ pr_info("%s: initialize bidirectional-mode\n", __func__);
parport_writeb ( (0x10 + 0x20), port + 4);
} else {
- printk("%s: enhanced parport-modes not supported.\n", __func__);
+ pr_info("%s: enhanced parport-modes not supported\n", __func__);
}
p = parport_gsc_probe_port(port, 0, dev->irq,
diff --git a/drivers/parport/parport_gsc.h b/drivers/parport/parport_gsc.h
index 4c4d3c6cd77e..9301217edf12 100644
--- a/drivers/parport/parport_gsc.h
+++ b/drivers/parport/parport_gsc.h
@@ -71,7 +71,7 @@ struct parport_gsc_private {
static inline void parport_gsc_write_data(struct parport *p, unsigned char d)
{
#ifdef DEBUG_PARPORT
- printk (KERN_DEBUG "parport_gsc_write_data(%p,0x%02x)\n", p, d);
+ printk(KERN_DEBUG "%s(%p,0x%02x)\n", __func__, p, d);
#endif
parport_writeb(d, DATA(p));
}
@@ -80,8 +80,7 @@ static inline unsigned char parport_gsc_read_data(struct parport *p)
{
unsigned char val = parport_readb (DATA (p));
#ifdef DEBUG_PARPORT
- printk (KERN_DEBUG "parport_gsc_read_data(%p) = 0x%02x\n",
- p, val);
+ printk(KERN_DEBUG "%s(%p) = 0x%02x\n", __func__, p, val);
#endif
return val;
}
@@ -95,9 +94,9 @@ static inline unsigned char __parport_gsc_frob_control(struct parport *p,
struct parport_gsc_private *priv = p->physport->private_data;
unsigned char ctr = priv->ctr;
#ifdef DEBUG_PARPORT
- printk (KERN_DEBUG
- "__parport_gsc_frob_control(%02x,%02x): %02x -> %02x\n",
- mask, val, ctr, ((ctr & ~mask) ^ val) & priv->ctr_writable);
+ printk(KERN_DEBUG "%s(%02x,%02x): %02x -> %02x\n",
+ __func__, mask, val,
+ ctr, ((ctr & ~mask) ^ val) & priv->ctr_writable);
#endif
ctr = (ctr & ~mask) ^ val;
ctr &= priv->ctr_writable; /* only write writable bits. */
@@ -126,8 +125,8 @@ static inline void parport_gsc_write_control(struct parport *p,
/* Take this out when drivers have adapted to newer interface. */
if (d & 0x20) {
- printk (KERN_DEBUG "%s (%s): use data_reverse for this!\n",
- p->name, p->cad->name);
+ printk(KERN_DEBUG "%s (%s): use data_reverse for this!\n",
+ p->name, p->cad->name);
parport_gsc_data_reverse (p);
}
@@ -155,9 +154,9 @@ static inline unsigned char parport_gsc_frob_control(struct parport *p,
/* Take this out when drivers have adapted to newer interface. */
if (mask & 0x20) {
- printk (KERN_DEBUG "%s (%s): use data_%s for this!\n",
- p->name, p->cad->name,
- (val & 0x20) ? "reverse" : "forward");
+ printk(KERN_DEBUG "%s (%s): use data_%s for this!\n",
+ p->name, p->cad->name,
+ (val & 0x20) ? "reverse" : "forward");
if (val & 0x20)
parport_gsc_data_reverse (p);
else
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index ab215b650f41..48b084e86dc6 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -328,19 +328,19 @@ static void parport_ip32_dump_state(struct parport *p, char *str,
"TST", "CFG"};
unsigned int ecr = readb(priv->regs.ecr);
printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr);
- printk(" %s",
- ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
+ pr_cont(" %s",
+ ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
if (ecr & ECR_nERRINTR)
- printk(",nErrIntrEn");
+ pr_cont(",nErrIntrEn");
if (ecr & ECR_DMAEN)
- printk(",dmaEn");
+ pr_cont(",dmaEn");
if (ecr & ECR_SERVINTR)
- printk(",serviceIntr");
+ pr_cont(",serviceIntr");
if (ecr & ECR_F_FULL)
- printk(",f_full");
+ pr_cont(",f_full");
if (ecr & ECR_F_EMPTY)
- printk(",f_empty");
- printk("\n");
+ pr_cont(",f_empty");
+ pr_cont("\n");
}
if (show_ecp_config) {
unsigned int oecr, cnfgA, cnfgB;
@@ -352,52 +352,53 @@ static void parport_ip32_dump_state(struct parport *p, char *str,
writeb(ECR_MODE_PS2, priv->regs.ecr);
writeb(oecr, priv->regs.ecr);
printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA);
- printk(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
+ pr_cont(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
switch (cnfgA & CNFGA_ID_MASK) {
case CNFGA_ID_8:
- printk(",8 bits");
+ pr_cont(",8 bits");
break;
case CNFGA_ID_16:
- printk(",16 bits");
+ pr_cont(",16 bits");
break;
case CNFGA_ID_32:
- printk(",32 bits");
+ pr_cont(",32 bits");
break;
default:
- printk(",unknown ID");
+ pr_cont(",unknown ID");
break;
}
if (!(cnfgA & CNFGA_nBYTEINTRANS))
- printk(",ByteInTrans");
+ pr_cont(",ByteInTrans");
if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8)
- printk(",%d byte%s left", cnfgA & CNFGA_PWORDLEFT,
- ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
- printk("\n");
+ pr_cont(",%d byte%s left",
+ cnfgA & CNFGA_PWORDLEFT,
+ ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
+ pr_cont("\n");
printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB);
- printk(" irq=%u,dma=%u",
- (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
- (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
- printk(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
+ pr_cont(" irq=%u,dma=%u",
+ (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
+ (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
+ pr_cont(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
if (cnfgB & CNFGB_COMPRESS)
- printk(",compress");
- printk("\n");
+ pr_cont(",compress");
+ pr_cont("\n");
}
for (i = 0; i < 2; i++) {
unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr);
printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x",
i ? "soft" : "hard", dcr);
- printk(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
+ pr_cont(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
if (dcr & DCR_IRQ)
- printk(",ackIntEn");
+ pr_cont(",ackIntEn");
if (!(dcr & DCR_SELECT))
- printk(",nSelectIn");
+ pr_cont(",nSelectIn");
if (dcr & DCR_nINIT)
- printk(",nInit");
+ pr_cont(",nInit");
if (!(dcr & DCR_AUTOFD))
- printk(",nAutoFD");
+ pr_cont(",nAutoFD");
if (!(dcr & DCR_STROBE))
- printk(",nStrobe");
- printk("\n");
+ pr_cont(",nStrobe");
+ pr_cont("\n");
}
#define sep (f++ ? ',' : ' ')
{
@@ -405,20 +406,20 @@ static void parport_ip32_dump_state(struct parport *p, char *str,
unsigned int dsr = readb(priv->regs.dsr);
printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr);
if (!(dsr & DSR_nBUSY))
- printk("%cBusy", sep);
+ pr_cont("%cBusy", sep);
if (dsr & DSR_nACK)
- printk("%cnAck", sep);
+ pr_cont("%cnAck", sep);
if (dsr & DSR_PERROR)
- printk("%cPError", sep);
+ pr_cont("%cPError", sep);
if (dsr & DSR_SELECT)
- printk("%cSelect", sep);
+ pr_cont("%cSelect", sep);
if (dsr & DSR_nFAULT)
- printk("%cnFault", sep);
+ pr_cont("%cnFault", sep);
if (!(dsr & DSR_nPRINT))
- printk("%c(Print)", sep);
+ pr_cont("%c(Print)", sep);
if (dsr & DSR_TIMEOUT)
- printk("%cTimeout", sep);
- printk("\n");
+ pr_cont("%cTimeout", sep);
+ pr_cont("\n");
}
#undef sep
}
@@ -1337,9 +1338,8 @@ static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
ecr = parport_ip32_read_econtrol(p);
if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
&& !lost_interrupt) {
- printk(KERN_WARNING PPIP32
- "%s: lost interrupt in %s\n",
- p->name, __func__);
+ pr_warn(PPIP32 "%s: lost interrupt in %s\n",
+ p->name, __func__);
lost_interrupt = 1;
}
}
@@ -1643,8 +1643,8 @@ static size_t parport_ip32_compat_write_data(struct parport *p,
DSR_nBUSY | DSR_nFAULT)) {
/* Avoid to flood the logs */
if (ready_before)
- printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
- p->name, __func__);
+ pr_info(PPIP32 "%s: not ready in %s\n",
+ p->name, __func__);
ready_before = 0;
goto stop;
}
@@ -1704,7 +1704,7 @@ static size_t parport_ip32_ecp_write_data(struct parport *p,
/* Event 49: PError goes high. */
if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
- printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s",
+ printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s\n",
p->name, __func__);
physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
return 0;
@@ -1724,8 +1724,8 @@ static size_t parport_ip32_ecp_write_data(struct parport *p,
DSR_nBUSY | DSR_nFAULT)) {
/* Avoid to flood the logs */
if (ready_before)
- printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
- p->name, __func__);
+ pr_info(PPIP32 "%s: not ready in %s\n",
+ p->name, __func__);
ready_before = 0;
goto stop;
}
@@ -2064,8 +2064,7 @@ static __init struct parport *parport_ip32_probe_port(void)
p->modes |= PARPORT_MODE_TRISTATE;
if (!parport_ip32_fifo_supported(p)) {
- printk(KERN_WARNING PPIP32
- "%s: error: FIFO disabled\n", p->name);
+ pr_warn(PPIP32 "%s: error: FIFO disabled\n", p->name);
/* Disable hardware modes depending on a working FIFO. */
features &= ~PARPORT_IP32_ENABLE_SPP;
features &= ~PARPORT_IP32_ENABLE_ECP;
@@ -2077,8 +2076,7 @@ static __init struct parport *parport_ip32_probe_port(void)
if (features & PARPORT_IP32_ENABLE_IRQ) {
int irq = MACEISA_PARALLEL_IRQ;
if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
- printk(KERN_WARNING PPIP32
- "%s: error: IRQ disabled\n", p->name);
+ pr_warn(PPIP32 "%s: error: IRQ disabled\n", p->name);
/* DMA cannot work without interrupts. */
features &= ~PARPORT_IP32_ENABLE_DMA;
} else {
@@ -2091,8 +2089,7 @@ static __init struct parport *parport_ip32_probe_port(void)
/* Allocate DMA resources */
if (features & PARPORT_IP32_ENABLE_DMA) {
if (parport_ip32_dma_register())
- printk(KERN_WARNING PPIP32
- "%s: error: DMA disabled\n", p->name);
+ pr_warn(PPIP32 "%s: error: DMA disabled\n", p->name);
else {
pr_probe(p, "DMA support enabled\n");
p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
@@ -2134,13 +2131,15 @@ static __init struct parport *parport_ip32_probe_port(void)
parport_ip32_dump_state(p, "end init", 0);
/* Print out what we found */
- printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)",
- p->name, p->base, p->base_hi);
+ pr_info("%s: SGI IP32 at 0x%lx (0x%lx)", p->name, p->base, p->base_hi);
if (p->irq != PARPORT_IRQ_NONE)
- printk(", irq %d", p->irq);
- printk(" [");
-#define printmode(x) if (p->modes & PARPORT_MODE_##x) \
- printk("%s%s", f++ ? "," : "", #x)
+ pr_cont(", irq %d", p->irq);
+ pr_cont(" [");
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
{
unsigned int f = 0;
printmode(PCSPP);
@@ -2151,7 +2150,7 @@ static __init struct parport *parport_ip32_probe_port(void)
printmode(DMA);
}
#undef printmode
- printk("]\n");
+ pr_cont("]\n");
parport_announce_port(p);
return p;
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
index 9f87faf939e3..d6bbe8446301 100644
--- a/drivers/parport/parport_mfc3.c
+++ b/drivers/parport/parport_mfc3.c
@@ -70,11 +70,6 @@
#define MAX_MFC 5
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK printk
-#else
-static inline int DPRINTK(void *nothing, ...) {return 0;}
-#endif
static struct parport *this_port[MAX_MFC] = {NULL, };
static volatile int dummy; /* for trigger readds */
@@ -84,7 +79,7 @@ static struct parport_operations pp_mfc3_ops;
static void mfc3_write_data(struct parport *p, unsigned char data)
{
-DPRINTK(KERN_DEBUG "write_data %c\n",data);
+ pr_debug("write_data %c\n", data);
dummy = pia(p)->pprb; /* clears irq bit */
/* Triggers also /STROBE.*/
@@ -128,13 +123,13 @@ static unsigned char control_mfc3_to_pc(unsigned char control)
static void mfc3_write_control(struct parport *p, unsigned char control)
{
-DPRINTK(KERN_DEBUG "write_control %02x\n",control);
+ pr_debug("write_control %02x\n", control);
pia(p)->ppra = (pia(p)->ppra & 0x1f) | control_pc_to_mfc3(control);
}
static unsigned char mfc3_read_control( struct parport *p)
{
-DPRINTK(KERN_DEBUG "read_control \n");
+ pr_debug("read_control\n");
return control_mfc3_to_pc(pia(p)->ppra & 0xe0);
}
@@ -142,7 +137,7 @@ static unsigned char mfc3_frob_control( struct parport *p, unsigned char mask, u
{
unsigned char old;
-DPRINTK(KERN_DEBUG "frob_control mask %02x, value %02x\n",mask,val);
+ pr_debug("frob_control mask %02x, value %02x\n", mask, val);
old = mfc3_read_control(p);
mfc3_write_control(p, (old & ~mask) ^ val);
return old;
@@ -171,7 +166,7 @@ static unsigned char mfc3_read_status(struct parport *p)
unsigned char status;
status = status_mfc3_to_pc(pia(p)->ppra & 0x1f);
-DPRINTK(KERN_DEBUG "read_status %02x\n", status);
+ pr_debug("read_status %02x\n", status);
return status;
}
@@ -202,7 +197,7 @@ static void mfc3_disable_irq(struct parport *p)
static void mfc3_data_forward(struct parport *p)
{
- DPRINTK(KERN_DEBUG "forward\n");
+ pr_debug("forward\n");
pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
pia(p)->pddrb = 255; /* all pins output */
pia(p)->crb |= PIA_DDR; /* make data register visible - default */
@@ -210,7 +205,7 @@ static void mfc3_data_forward(struct parport *p)
static void mfc3_data_reverse(struct parport *p)
{
- DPRINTK(KERN_DEBUG "reverse\n");
+ pr_debug("reverse\n");
pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
pia(p)->pddrb = 0; /* all pins input */
pia(p)->crb |= PIA_DDR; /* make data register visible - default */
@@ -325,7 +320,7 @@ static int __init parport_mfc3_init(void)
p->dev = &z->dev;
this_port[pias++] = p;
- printk(KERN_INFO "%s: Multiface III port using irq\n", p->name);
+ pr_info("%s: Multiface III port using irq\n", p->name);
/* XXX: set operating mode */
p->private_data = (void *)piabase;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 1f17a39eabe8..77e37e3cb3a0 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -87,13 +87,6 @@
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK printk
-#else
-#define DPRINTK(stuff...)
-#endif
-
-
#define NR_SUPERIOS 3
static struct superio_struct { /* For Super-IO chips autodetection */
int io;
@@ -118,8 +111,8 @@ static void frob_econtrol(struct parport *pb, unsigned char m,
if (m != 0xff)
ectr = inb(ECONTROL(pb));
- DPRINTK(KERN_DEBUG "frob_econtrol(%02x,%02x): %02x -> %02x\n",
- m, v, ectr, (ectr & ~m) ^ v);
+ pr_debug("frob_econtrol(%02x,%02x): %02x -> %02x\n",
+ m, v, ectr, (ectr & ~m) ^ v);
outb((ectr & ~m) ^ v, ECONTROL(pb));
}
@@ -142,7 +135,7 @@ static int change_mode(struct parport *p, int m)
unsigned char oecr;
int mode;
- DPRINTK(KERN_INFO "parport change_mode ECP-ISA to mode 0x%02x\n", m);
+ pr_debug("parport change_mode ECP-ISA to mode 0x%02x\n", m);
if (!priv->ecr) {
printk(KERN_DEBUG "change_mode: but there's no ECR!\n");
@@ -298,8 +291,8 @@ static size_t parport_pc_epp_read_data(struct parport *port, void *buf,
status = inb(STATUS(port));
if (status & 0x01) {
/* EPP timeout should never occur... */
- printk(KERN_DEBUG
-"%s: EPP timeout occurred while talking to w91284pic (should not have done)\n", port->name);
+ printk(KERN_DEBUG "%s: EPP timeout occurred while talking to w91284pic (should not have done)\n",
+ port->name);
clear_epp_timeout(port);
}
}
@@ -727,7 +720,7 @@ static size_t parport_pc_compat_write_block_pio(struct parport *port,
r = change_mode(port, ECR_PPF); /* Parallel port FIFO */
if (r)
printk(KERN_DEBUG "%s: Warning change_mode ECR_PPF failed\n",
- port->name);
+ port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
@@ -770,9 +763,8 @@ static size_t parport_pc_compat_write_block_pio(struct parport *port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
if (r)
- printk(KERN_DEBUG
- "%s: BUSY timeout (%d) in compat_write_block_pio\n",
- port->name, r);
+ printk(KERN_DEBUG "%s: BUSY timeout (%d) in compat_write_block_pio\n",
+ port->name, r);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
@@ -810,8 +802,8 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
- printk(KERN_DEBUG "%s: PError timeout (%d) "
- "in ecp_write_block_pio\n", port->name, r);
+ printk(KERN_DEBUG "%s: PError timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
}
}
@@ -824,7 +816,7 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
r = change_mode(port, ECR_ECP); /* ECP FIFO */
if (r)
printk(KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n",
- port->name);
+ port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
/* Write the data to the FIFO. */
@@ -867,8 +859,8 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
parport_frob_control(port, PARPORT_CONTROL_INIT, 0);
r = parport_wait_peripheral(port, PARPORT_STATUS_PAPEROUT, 0);
if (r)
- printk(KERN_DEBUG "%s: PE,1 timeout (%d) "
- "in ecp_write_block_pio\n", port->name, r);
+ printk(KERN_DEBUG "%s: PE,1 timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
parport_frob_control(port,
PARPORT_CONTROL_INIT,
@@ -877,17 +869,16 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r)
- printk(KERN_DEBUG "%s: PE,2 timeout (%d) "
- "in ecp_write_block_pio\n", port->name, r);
+ printk(KERN_DEBUG "%s: PE,2 timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
}
r = parport_wait_peripheral(port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
if (r)
- printk(KERN_DEBUG
- "%s: BUSY timeout (%d) in ecp_write_block_pio\n",
- port->name, r);
+ printk(KERN_DEBUG "%s: BUSY timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
@@ -982,28 +973,24 @@ static void show_parconfig_smsc37c669(int io, int key)
outb(0xaa, io);
if (verbose_probing) {
- printk(KERN_INFO
- "SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, "
- "A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n",
+ pr_info("SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n",
cr1, cr4, cra, cr23, cr26, cr27);
/* The documentation calls DMA and IRQ-Lines by letters, so
the board maker can/will wire them
appropriately/randomly... G=reserved H=IDE-irq, */
- printk(KERN_INFO
- "SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n",
- cr23 * 4,
- (cr27 & 0x0f) ? 'A' - 1 + (cr27 & 0x0f) : '-',
- (cr26 & 0x0f) ? 'A' - 1 + (cr26 & 0x0f) : '-',
- cra & 0x0f);
- printk(KERN_INFO "SMSC LPT Config: enabled=%s power=%s\n",
- (cr23 * 4 >= 0x100) ? "yes" : "no",
- (cr1 & 4) ? "yes" : "no");
- printk(KERN_INFO
- "SMSC LPT Config: Port mode=%s, EPP version =%s\n",
- (cr1 & 0x08) ? "Standard mode only (SPP)"
- : modes[cr4 & 0x03],
- (cr4 & 0x40) ? "1.7" : "1.9");
+ pr_info("SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n",
+ cr23 * 4,
+ (cr27 & 0x0f) ? 'A' - 1 + (cr27 & 0x0f) : '-',
+ (cr26 & 0x0f) ? 'A' - 1 + (cr26 & 0x0f) : '-',
+ cra & 0x0f);
+ pr_info("SMSC LPT Config: enabled=%s power=%s\n",
+ (cr23 * 4 >= 0x100) ? "yes" : "no",
+ (cr1 & 4) ? "yes" : "no");
+ pr_info("SMSC LPT Config: Port mode=%s, EPP version =%s\n",
+ (cr1 & 0x08) ? "Standard mode only (SPP)"
+ : modes[cr4 & 0x03],
+ (cr4 & 0x40) ? "1.7" : "1.9");
}
/* Heuristics ! BIOS setup for this mainboard device limits
@@ -1013,7 +1000,7 @@ static void show_parconfig_smsc37c669(int io, int key)
if (cr23 * 4 >= 0x100) { /* if active */
s = find_free_superio();
if (s == NULL)
- printk(KERN_INFO "Super-IO: too many chips!\n");
+ pr_info("Super-IO: too many chips!\n");
else {
int d;
switch (cr23 * 4) {
@@ -1078,26 +1065,24 @@ static void show_parconfig_winbond(int io, int key)
outb(0xaa, io);
if (verbose_probing) {
- printk(KERN_INFO
- "Winbond LPT Config: cr_30=%02x 60,61=%02x%02x 70=%02x 74=%02x, f0=%02x\n",
- cr30, cr60, cr61, cr70, cr74, crf0);
- printk(KERN_INFO "Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
- (cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
+ pr_info("Winbond LPT Config: cr_30=%02x 60,61=%02x%02x 70=%02x 74=%02x, f0=%02x\n",
+ cr30, cr60, cr61, cr70, cr74, crf0);
+ pr_info("Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
+ (cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
if ((cr74 & 0x07) > 3)
pr_cont("dma=none\n");
else
pr_cont("dma=%d\n", cr74 & 0x07);
- printk(KERN_INFO
- "Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
- irqtypes[crf0>>7], (crf0>>3)&0x0f);
- printk(KERN_INFO "Winbond LPT Config: Port mode=%s\n",
- modes[crf0 & 0x07]);
+ pr_info("Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
+ irqtypes[crf0 >> 7], (crf0 >> 3) & 0x0f);
+ pr_info("Winbond LPT Config: Port mode=%s\n",
+ modes[crf0 & 0x07]);
}
if (cr30 & 0x01) { /* the settings can be interrogated later ... */
s = find_free_superio();
if (s == NULL)
- printk(KERN_INFO "Super-IO: too many chips!\n");
+ pr_info("Super-IO: too many chips!\n");
else {
s->io = (cr60 << 8) | cr61;
s->irq = cr70 & 0x0f;
@@ -1151,9 +1136,8 @@ static void decode_winbond(int efer, int key, int devid, int devrev, int oldid)
progif = 0;
if (verbose_probing)
- printk(KERN_INFO "Winbond chip at EFER=0x%x key=0x%02x "
- "devid=%02x devrev=%02x oldid=%02x type=%s\n",
- efer, key, devid, devrev, oldid, type);
+ pr_info("Winbond chip at EFER=0x%x key=0x%02x devid=%02x devrev=%02x oldid=%02x type=%s\n",
+ efer, key, devid, devrev, oldid, type);
if (progif == 2)
show_parconfig_winbond(efer, key);
@@ -1184,9 +1168,8 @@ static void decode_smsc(int efer, int key, int devid, int devrev)
type = "37c666GT";
if (verbose_probing)
- printk(KERN_INFO "SMSC chip at EFER=0x%x "
- "key=0x%02x devid=%02x devrev=%02x type=%s\n",
- efer, key, devid, devrev, type);
+ pr_info("SMSC chip at EFER=0x%x key=0x%02x devid=%02x devrev=%02x type=%s\n",
+ efer, key, devid, devrev, type);
if (func)
func(efer, key);
@@ -1358,7 +1341,7 @@ static void detect_and_report_it87(void)
dev |= inb(0x2f);
if (dev == 0x8712 || dev == 0x8705 || dev == 0x8715 ||
dev == 0x8716 || dev == 0x8718 || dev == 0x8726) {
- printk(KERN_INFO "IT%04X SuperIO detected.\n", dev);
+ pr_info("IT%04X SuperIO detected\n", dev);
outb(0x07, 0x2E); /* Parallel Port */
outb(0x03, 0x2F);
outb(0xF0, 0x2E); /* BOOT 0x80 off */
@@ -1445,8 +1428,8 @@ static int parport_SPP_supported(struct parport *pb)
if (user_specified)
/* That didn't work, but the user thinks there's a
* port here. */
- printk(KERN_INFO "parport 0x%lx (WARNING): CTR: "
- "wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
+ pr_info("parport 0x%lx (WARNING): CTR: wrote 0x%02x, read 0x%02x\n",
+ pb->base, w, r);
/* Try the data register. The data lines aren't tri-stated at
* this stage, so we expect back what we wrote. */
@@ -1464,10 +1447,9 @@ static int parport_SPP_supported(struct parport *pb)
if (user_specified) {
/* Didn't work, but the user is convinced this is the
* place. */
- printk(KERN_INFO "parport 0x%lx (WARNING): DATA: "
- "wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
- printk(KERN_INFO "parport 0x%lx: You gave this address, "
- "but there is probably no parallel port there!\n",
+ pr_info("parport 0x%lx (WARNING): DATA: wrote 0x%02x, read 0x%02x\n",
+ pb->base, w, r);
+ pr_info("parport 0x%lx: You gave this address, but there is probably no parallel port there!\n",
pb->base);
}
@@ -1620,7 +1602,7 @@ static int parport_ECP_supported(struct parport *pb)
if (i <= priv->fifo_depth) {
if (verbose_probing)
printk(KERN_DEBUG "0x%lx: writeIntrThreshold is %d\n",
- pb->base, i);
+ pb->base, i);
} else
/* Number of bytes we know we can write if we get an
interrupt. */
@@ -1642,7 +1624,7 @@ static int parport_ECP_supported(struct parport *pb)
if (i <= priv->fifo_depth) {
if (verbose_probing)
- printk(KERN_INFO "0x%lx: readIntrThreshold is %d\n",
+ pr_info("0x%lx: readIntrThreshold is %d\n",
pb->base, i);
} else
/* Number of bytes we can read if we get an interrupt. */
@@ -1657,17 +1639,14 @@ static int parport_ECP_supported(struct parport *pb)
switch (pword) {
case 0:
pword = 2;
- printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
- pb->base);
+ pr_warn("0x%lx: Unsupported pword size!\n", pb->base);
break;
case 2:
pword = 4;
- printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
- pb->base);
+ pr_warn("0x%lx: Unsupported pword size!\n", pb->base);
break;
default:
- printk(KERN_WARNING "0x%lx: Unknown implementation ID\n",
- pb->base);
+ pr_warn("0x%lx: Unknown implementation ID\n", pb->base);
/* Fall through - Assume 1 */
case 1:
pword = 1;
@@ -1676,14 +1655,14 @@ static int parport_ECP_supported(struct parport *pb)
if (verbose_probing) {
printk(KERN_DEBUG "0x%lx: PWord is %d bits\n",
- pb->base, 8 * pword);
+ pb->base, 8 * pword);
- printk(KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n", pb->base,
- config & 0x80 ? "Level" : "Pulses");
+ printk(KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n",
+ pb->base, config & 0x80 ? "Level" : "Pulses");
configb = inb(CONFIGB(pb));
printk(KERN_DEBUG "0x%lx: ECP port cfgA=0x%02x cfgB=0x%02x\n",
- pb->base, config, configb);
+ pb->base, config, configb);
printk(KERN_DEBUG "0x%lx: ECP settings irq=", pb->base);
if ((configb >> 3) & 0x07)
pr_cont("%d", intrline[(configb >> 3) & 0x07]);
@@ -2107,9 +2086,9 @@ struct parport *parport_pc_probe_port(unsigned long int base,
p->size = (p->modes & PARPORT_MODE_EPP) ? 8 : 3;
- printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
+ pr_info("%s: PC-style at 0x%lx", p->name, p->base);
if (p->base_hi && priv->ecr)
- printk(KERN_CONT " (0x%lx)", p->base_hi);
+ pr_cont(" (0x%lx)", p->base_hi);
if (p->irq == PARPORT_IRQ_AUTO) {
p->irq = PARPORT_IRQ_NONE;
parport_irq_probe(p);
@@ -2120,7 +2099,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
p->irq = PARPORT_IRQ_NONE;
}
if (p->irq != PARPORT_IRQ_NONE) {
- printk(KERN_CONT ", irq %d", p->irq);
+ pr_cont(", irq %d", p->irq);
priv->ctr_writable |= 0x10;
if (p->dma == PARPORT_DMA_AUTO) {
@@ -2144,41 +2123,39 @@ struct parport *parport_pc_probe_port(unsigned long int base,
/* p->ops->ecp_read_data = parport_pc_ecp_read_block_pio; */
#endif /* IEEE 1284 support */
if (p->dma != PARPORT_DMA_NONE) {
- printk(KERN_CONT ", dma %d", p->dma);
+ pr_cont(", dma %d", p->dma);
p->modes |= PARPORT_MODE_DMA;
} else
- printk(KERN_CONT ", using FIFO");
+ pr_cont(", using FIFO");
} else
/* We can't use the DMA channel after all. */
p->dma = PARPORT_DMA_NONE;
#endif /* Allowed to use FIFO/DMA */
- printk(KERN_CONT " [");
+ pr_cont(" [");
-#define printmode(x) \
- {\
- if (p->modes & PARPORT_MODE_##x) {\
- printk(KERN_CONT "%s%s", f ? "," : "", #x);\
- f++;\
- } \
- }
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
{
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
- printmode(COMPAT)
+ printmode(COMPAT);
printmode(EPP);
printmode(ECP);
printmode(DMA);
}
#undef printmode
#ifndef CONFIG_PARPORT_1284
- printk(KERN_CONT "(,...)");
+ pr_cont("(,...)");
#endif /* CONFIG_PARPORT_1284 */
- printk(KERN_CONT "]\n");
+ pr_cont("]\n");
if (probedirq != PARPORT_IRQ_NONE)
- printk(KERN_INFO "%s: irq %d detected\n", p->name, probedirq);
+ pr_info("%s: irq %d detected\n", p->name, probedirq);
/* If No ECP release the ports grabbed above. */
if (ECR_res && (p->modes & PARPORT_MODE_ECP) == 0) {
@@ -2193,8 +2170,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq(p->irq, parport_irq_handler,
irqflags, p->name, p)) {
- printk(KERN_WARNING "%s: irq %d in use, "
- "resorting to polled operation\n",
+ pr_warn("%s: irq %d in use, resorting to polled operation\n",
p->name, p->irq);
p->irq = PARPORT_IRQ_NONE;
p->dma = PARPORT_DMA_NONE;
@@ -2204,8 +2180,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
#ifdef HAS_DMA
if (p->dma != PARPORT_DMA_NONE) {
if (request_dma(p->dma, p->name)) {
- printk(KERN_WARNING "%s: dma %d in use, "
- "resorting to PIO operation\n",
+ pr_warn("%s: dma %d in use, resorting to PIO operation\n",
p->name, p->dma);
p->dma = PARPORT_DMA_NONE;
} else {
@@ -2215,9 +2190,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
&priv->dma_handle,
GFP_KERNEL);
if (!priv->dma_buf) {
- printk(KERN_WARNING "%s: "
- "cannot get buffer for DMA, "
- "resorting to PIO operation\n",
+ pr_warn("%s: cannot get buffer for DMA, resorting to PIO operation\n",
p->name);
free_dma(p->dma);
p->dma = PARPORT_DMA_NONE;
@@ -2313,7 +2286,7 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
int irq;
int i;
- DPRINTK(KERN_DEBUG "sio_ite_8872_probe()\n");
+ pr_debug("sio_ite_8872_probe()\n");
/* make sure which one chip */
for (i = 0; i < 5; i++) {
@@ -2330,7 +2303,7 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
}
}
if (i >= 5) {
- printk(KERN_INFO "parport_pc: cannot find ITE8872 INTA\n");
+ pr_info("parport_pc: cannot find ITE8872 INTA\n");
return 0;
}
@@ -2339,29 +2312,28 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
switch (type) {
case 0x2:
- printk(KERN_INFO "parport_pc: ITE8871 found (1P)\n");
+ pr_info("parport_pc: ITE8871 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xa:
- printk(KERN_INFO "parport_pc: ITE8875 found (1P)\n");
+ pr_info("parport_pc: ITE8875 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xe:
- printk(KERN_INFO "parport_pc: ITE8872 found (2S1P)\n");
+ pr_info("parport_pc: ITE8872 found (2S1P)\n");
ite8872set = 0x64e00000;
break;
case 0x6:
- printk(KERN_INFO "parport_pc: ITE8873 found (1S)\n");
+ pr_info("parport_pc: ITE8873 found (1S)\n");
release_region(inta_addr[i], 32);
return 0;
case 0x8:
- printk(KERN_INFO "parport_pc: ITE8874 found (2S)\n");
+ pr_info("parport_pc: ITE8874 found (2S)\n");
release_region(inta_addr[i], 32);
return 0;
default:
- printk(KERN_INFO "parport_pc: unknown ITE887x\n");
- printk(KERN_INFO "parport_pc: please mail 'lspci -nvv' "
- "output to Rich.Liu@ite.com.tw\n");
+ pr_info("parport_pc: unknown ITE887x\n");
+ pr_info("parport_pc: please mail 'lspci -nvv' output to Rich.Liu@ite.com.tw\n");
release_region(inta_addr[i], 32);
return 0;
}
@@ -2379,11 +2351,9 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
pci_write_config_dword(pdev, 0x9c,
ite8872set | (ite8872_irq * 0x11111));
- DPRINTK(KERN_DEBUG "ITE887x: The IRQ is %d.\n", ite8872_irq);
- DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O port is 0x%x.\n",
- ite8872_lpt);
- DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O porthi is 0x%x.\n",
- ite8872_lpthi);
+ pr_debug("ITE887x: The IRQ is %d\n", ite8872_irq);
+ pr_debug("ITE887x: The PARALLEL I/O port is 0x%x\n", ite8872_lpt);
+ pr_debug("ITE887x: The PARALLEL I/O porthi is 0x%x\n", ite8872_lpthi);
/* Let the user (or defaults) steer us away from interrupts */
irq = ite8872_irq;
@@ -2396,9 +2366,8 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
release_region(inta_addr[i], 32);
if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
- printk(KERN_INFO
- "parport_pc: ITE 8872 parallel port: io=0x%X",
- ite8872_lpt);
+ pr_info("parport_pc: ITE 8872 parallel port: io=0x%X",
+ ite8872_lpt);
if (irq != PARPORT_IRQ_NONE)
pr_cont(", irq=%d", irq);
pr_cont("\n");
@@ -2471,8 +2440,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
have_epp = 1;
break;
default:
- printk(KERN_DEBUG
- "parport_pc: probing current configuration\n");
+ printk(KERN_DEBUG "parport_pc: probing current configuration\n");
siofunc = VIA_FUNCTION_PROBE;
break;
}
@@ -2508,12 +2476,11 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
port1 = inb(VIA_CONFIG_DATA) << 2;
printk(KERN_DEBUG "parport_pc: Current parallel port base: 0x%X\n",
- port1);
+ port1);
if (port1 == 0x3BC && have_epp) {
outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
outb((0x378 >> 2), VIA_CONFIG_DATA);
- printk(KERN_DEBUG
- "parport_pc: Parallel port base changed to 0x378\n");
+ printk(KERN_DEBUG "parport_pc: Parallel port base changed to 0x378\n");
port1 = 0x378;
}
@@ -2525,7 +2492,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
pci_write_config_byte(pdev, via->via_pci_superio_config_reg, tmp);
if (siofunc == VIA_FUNCTION_PARPORT_DISABLE) {
- printk(KERN_INFO "parport_pc: VIA parallel port disabled in BIOS\n");
+ pr_info("parport_pc: VIA parallel port disabled in BIOS\n");
return 0;
}
@@ -2558,9 +2525,8 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
case 0x278:
port2 = 0x678; break;
default:
- printk(KERN_INFO
- "parport_pc: Weird VIA parport base 0x%X, ignoring\n",
- port1);
+ pr_info("parport_pc: Weird VIA parport base 0x%X, ignoring\n",
+ port1);
return 0;
}
@@ -2579,8 +2545,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
/* finally, do the probe with values obtained */
if (parport_pc_probe_port(port1, port2, irq, dma, &pdev->dev, 0)) {
- printk(KERN_INFO
- "parport_pc: VIA parallel port: io=0x%X", port1);
+ pr_info("parport_pc: VIA parallel port: io=0x%X", port1);
if (irq != PARPORT_IRQ_NONE)
pr_cont(", irq=%d", irq);
if (dma != PARPORT_DMA_NONE)
@@ -2589,7 +2554,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
return 1;
}
- printk(KERN_WARNING "parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n",
+ pr_warn("parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n",
port1, irq, dma);
return 0;
}
@@ -2854,14 +2819,12 @@ static int parport_pc_pci_probe(struct pci_dev *dev,
/* TODO: test if sharing interrupts works */
irq = dev->irq;
if (irq == IRQ_NONE) {
- printk(KERN_DEBUG
- "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
- id->vendor, id->device, io_lo, io_hi);
+ printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
+ id->vendor, id->device, io_lo, io_hi);
irq = PARPORT_IRQ_NONE;
} else {
- printk(KERN_DEBUG
- "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
- id->vendor, id->device, io_lo, io_hi, irq);
+ printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
+ id->vendor, id->device, io_lo, io_hi, irq);
}
data->ports[count] =
parport_pc_probe_port(io_lo, io_hi, irq,
@@ -3111,7 +3074,7 @@ static int __init parport_parse_param(const char *s, int *val,
if (ep != s)
*val = r;
else {
- printk(KERN_ERR "parport: bad specifier `%s'\n", s);
+ pr_err("parport: bad specifier `%s'\n", s);
return -1;
}
}
@@ -3133,8 +3096,8 @@ static int __init parport_parse_dma(const char *dmastr, int *val)
#ifdef CONFIG_PCI
static int __init parport_init_mode_setup(char *str)
{
- printk(KERN_DEBUG
- "parport_pc.c: Specified parameter parport_init_mode=%s\n", str);
+ printk(KERN_DEBUG "parport_pc.c: Specified parameter parport_init_mode=%s\n",
+ str);
if (!strcmp(str, "spp"))
parport_init_mode = 1;
@@ -3201,10 +3164,7 @@ static int __init parse_parport_params(void)
irqval[0] = val;
break;
default:
- printk(KERN_WARNING
- "parport_pc: irq specified "
- "without base address. Use 'io=' "
- "to specify one\n");
+ pr_warn("parport_pc: irq specified without base address. Use 'io=' to specify one\n");
}
if (dma[0] && !parport_parse_dma(dma[0], &val))
@@ -3214,10 +3174,7 @@ static int __init parse_parport_params(void)
dmaval[0] = val;
break;
default:
- printk(KERN_WARNING
- "parport_pc: dma specified "
- "without base address. Use 'io=' "
- "to specify one\n");
+ pr_warn("parport_pc: dma specified without base address. Use 'io=' to specify one\n");
}
}
return 0;
@@ -3256,12 +3213,12 @@ static int __init parport_setup(char *str)
val = simple_strtoul(str, &endptr, 0);
if (endptr == str) {
- printk(KERN_WARNING "parport=%s not understood\n", str);
+ pr_warn("parport=%s not understood\n", str);
return 1;
}
if (parport_setup_ptr == PARPORT_PC_MAX_PORTS) {
- printk(KERN_ERR "parport=%s ignored, too many ports\n", str);
+ pr_err("parport=%s ignored, too many ports\n", str);
return 1;
}
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index d5a669b60c27..e840c1b5ab90 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -314,7 +314,7 @@ static int bpp_probe(struct platform_device *op)
value_tcr &= ~P_TCR_DIR;
sbus_writeb(value_tcr, &regs->p_tcr);
- printk(KERN_INFO "%s: sunbpp at 0x%lx\n", p->name, p->base);
+ pr_info("%s: sunbpp at 0x%lx\n", p->name, p->base);
dev_set_drvdata(&op->dev, p);
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e5e6a463a941..7e6d713fa5ac 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -38,16 +38,16 @@ static void pretty_print(struct parport *port, int device)
{
struct parport_device_info *info = &port->probe_info[device + 1];
- printk(KERN_INFO "%s", port->name);
+ pr_info("%s", port->name);
if (device >= 0)
- printk (" (addr %d)", device);
+ pr_cont(" (addr %d)", device);
- printk (": %s", classes[info->class].descr);
+ pr_cont(": %s", classes[info->class].descr);
if (info->class)
- printk(", %s %s", info->mfr, info->model);
+ pr_cont(", %s %s", info->mfr, info->model);
- printk("\n");
+ pr_cont("\n");
}
static void parse_data(struct parport *port, int device, char *str)
@@ -58,7 +58,7 @@ static void parse_data(struct parport *port, int device, char *str)
struct parport_device_info *info = &port->probe_info[device + 1];
if (!txt) {
- printk(KERN_WARNING "%s probe: memory squeeze\n", port->name);
+ pr_warn("%s probe: memory squeeze\n", port->name);
return;
}
strcpy(txt, str);
@@ -98,7 +98,8 @@ static void parse_data(struct parport *port, int device, char *str)
goto rock_on;
}
}
- printk(KERN_WARNING "%s probe: warning, class '%s' not understood.\n", port->name, sep);
+ pr_warn("%s probe: warning, class '%s' not understood\n",
+ port->name, sep);
info->class = PARPORT_CLASS_OTHER;
} else if (!strcmp(p, "CMD") ||
!strcmp(p, "COMMAND SET")) {
@@ -177,9 +178,8 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
* just return constant nibble forever. This catches
* also those cases. */
if (idlens[0] == 0 || idlens[0] > 0xFFF) {
- printk (KERN_DEBUG "%s: reported broken Device ID"
- " length of %#zX bytes\n",
- port->name, idlens[0]);
+ printk(KERN_DEBUG "%s: reported broken Device ID length of %#zX bytes\n",
+ port->name, idlens[0]);
return -EIO;
}
numidlens = 2;
@@ -201,10 +201,8 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) {
if (belen != len) {
- printk (KERN_DEBUG "%s: Device ID was %zd bytes"
- " while device told it would be %d"
- " bytes\n",
- port->name, len, belen);
+ printk(KERN_DEBUG "%s: Device ID was %zd bytes while device told it would be %d bytes\n",
+ port->name, len, belen);
}
goto done;
}
@@ -214,11 +212,9 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
* the first 256 bytes or so that we must have read so
* far. */
if (buffer[len-1] == ';') {
- printk (KERN_DEBUG "%s: Device ID reading stopped"
- " before device told data not available. "
- "Current idlen %u of %u, len bytes %02X %02X\n",
- port->name, current_idlen, numidlens,
- length[0], length[1]);
+ printk(KERN_DEBUG "%s: Device ID reading stopped before device told data not available. Current idlen %u of %u, len bytes %02X %02X\n",
+ port->name, current_idlen, numidlens,
+ length[0], length[1]);
goto done;
}
}
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index 48804049d697..e957beb94f14 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -213,7 +213,11 @@ static int do_hardware_modes(struct ctl_table *table, int write,
return -EACCES;
{
-#define printmode(x) {if(port->modes&PARPORT_MODE_##x){len+=sprintf(buffer+len,"%s%s",f?",":"",#x);f++;}}
+#define printmode(x) \
+do { \
+ if (port->modes & PARPORT_MODE_##x) \
+ len += sprintf(buffer + len, "%s%s", f++ ? "," : "", #x); \
+} while (0)
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index d6920ebeabcd..7fec4fefe151 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -278,46 +278,32 @@ static int port_detect(struct device *dev, void *dev_drv)
int __parport_register_driver(struct parport_driver *drv, struct module *owner,
const char *mod_name)
{
- if (drv->devmodel) {
- /* using device model */
- int ret;
-
- /* initialize common driver fields */
- drv->driver.name = drv->name;
- drv->driver.bus = &parport_bus_type;
- drv->driver.owner = owner;
- drv->driver.mod_name = mod_name;
- ret = driver_register(&drv->driver);
- if (ret)
- return ret;
+ /* using device model */
+ int ret;
- /*
- * check if bus has any parallel port registered, if
- * none is found then load the lowlevel driver.
- */
- ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
- port_detect);
- if (!ret)
- get_lowlevel_driver();
-
- mutex_lock(&registration_lock);
- if (drv->match_port)
- bus_for_each_dev(&parport_bus_type, NULL, drv,
- port_check);
- mutex_unlock(&registration_lock);
- } else {
- struct parport *port;
-
- drv->devmodel = false;
-
- if (list_empty(&portlist))
- get_lowlevel_driver();
- mutex_lock(&registration_lock);
- list_for_each_entry(port, &portlist, list)
- drv->attach(port);
- list_add(&drv->list, &drivers);
- mutex_unlock(&registration_lock);
- }
+ /* initialize common driver fields */
+ drv->driver.name = drv->name;
+ drv->driver.bus = &parport_bus_type;
+ drv->driver.owner = owner;
+ drv->driver.mod_name = mod_name;
+ ret = driver_register(&drv->driver);
+ if (ret)
+ return ret;
+
+ /*
+ * check if bus has any parallel port registered, if
+ * none is found then load the lowlevel driver.
+ */
+ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
+ port_detect);
+ if (!ret)
+ get_lowlevel_driver();
+
+ mutex_lock(&registration_lock);
+ if (drv->match_port)
+ bus_for_each_dev(&parport_bus_type, NULL, drv,
+ port_check);
+ mutex_unlock(&registration_lock);
return 0;
}
@@ -352,17 +338,9 @@ static int port_detach(struct device *dev, void *_drv)
void parport_unregister_driver(struct parport_driver *drv)
{
- struct parport *port;
-
mutex_lock(&registration_lock);
- if (drv->devmodel) {
- bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
- driver_unregister(&drv->driver);
- } else {
- list_del_init(&drv->list);
- list_for_each_entry(port, &portlist, list)
- drv->detach(port);
- }
+ bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
+ driver_unregister(&drv->driver);
mutex_unlock(&registration_lock);
}
EXPORT_SYMBOL(parport_unregister_driver);
@@ -554,8 +532,8 @@ void parport_announce_port(struct parport *port)
#endif
if (!port->dev)
- printk(KERN_WARNING "%s: fix this legacy no-device port driver!\n",
- port->name);
+ pr_warn("%s: fix this legacy no-device port driver!\n",
+ port->name);
parport_proc_register(port);
mutex_lock(&registration_lock);
@@ -641,47 +619,48 @@ void parport_remove_port(struct parport *port)
}
EXPORT_SYMBOL(parport_remove_port);
+static void free_pardevice(struct device *dev)
+{
+ struct pardevice *par_dev = to_pardevice(dev);
+
+ kfree(par_dev->name);
+ kfree(par_dev);
+}
+
/**
- * parport_register_device - register a device on a parallel port
+ * parport_register_dev_model - register a device on a parallel port
* @port: port to which the device is attached
* @name: a name to refer to the device
- * @pf: preemption callback
- * @kf: kick callback (wake-up)
- * @irq_func: interrupt handler
- * @flags: registration flags
- * @handle: data for callback functions
+ * @par_dev_cb: struct containing callbacks
+ * @id: device number to be given to the device
*
* This function, called by parallel port device drivers,
* declares that a device is connected to a port, and tells the
* system all it needs to know.
*
- * The @name is allocated by the caller and must not be
- * deallocated until the caller calls @parport_unregister_device
- * for that device.
- *
- * The preemption callback function, @pf, is called when this
- * device driver has claimed access to the port but another
- * device driver wants to use it. It is given @handle as its
- * parameter, and should return zero if it is willing for the
- * system to release the port to another driver on its behalf.
- * If it wants to keep control of the port it should return
- * non-zero, and no action will be taken. It is good manners for
- * the driver to try to release the port at the earliest
- * opportunity after its preemption callback rejects a preemption
- * attempt. Note that if a preemption callback is happy for
- * preemption to go ahead, there is no need to release the port;
- * it is done automatically. This function may not block, as it
- * may be called from interrupt context. If the device driver
- * does not support preemption, @pf can be %NULL.
+ * The struct pardev_cb contains pointer to callbacks. preemption
+ * callback function, @preempt, is called when this device driver
+ * has claimed access to the port but another device driver wants
+ * to use it. It is given, @private, as its parameter, and should
+ * return zero if it is willing for the system to release the port
+ * to another driver on its behalf. If it wants to keep control of
+ * the port it should return non-zero, and no action will be taken.
+ * It is good manners for the driver to try to release the port at
+ * the earliest opportunity after its preemption callback rejects a
+ * preemption attempt. Note that if a preemption callback is happy
+ * for preemption to go ahead, there is no need to release the
+ * port; it is done automatically. This function may not block, as
+ * it may be called from interrupt context. If the device driver
+ * does not support preemption, @preempt can be %NULL.
*
- * The wake-up ("kick") callback function, @kf, is called when
+ * The wake-up ("kick") callback function, @wakeup, is called when
* the port is available to be claimed for exclusive access; that
* is, parport_claim() is guaranteed to succeed when called from
* inside the wake-up callback function. If the driver wants to
* claim the port it should do so; otherwise, it need not take
* any action. This function may not block, as it may be called
* from interrupt context. If the device driver does not want to
- * be explicitly invited to claim the port in this way, @kf can
+ * be explicitly invited to claim the port in this way, @wakeup can
* be %NULL.
*
* The interrupt handler, @irq_func, is called when an interrupt
@@ -711,138 +690,6 @@ EXPORT_SYMBOL(parport_remove_port);
**/
struct pardevice *
-parport_register_device(struct parport *port, const char *name,
- int (*pf)(void *), void (*kf)(void *),
- void (*irq_func)(void *),
- int flags, void *handle)
-{
- struct pardevice *tmp;
-
- if (port->physport->flags & PARPORT_FLAG_EXCL) {
- /* An exclusive device is registered. */
- printk(KERN_DEBUG "%s: no more devices allowed\n",
- port->name);
- return NULL;
- }
-
- if (flags & PARPORT_DEV_LURK) {
- if (!pf || !kf) {
- printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
- return NULL;
- }
- }
-
- if (flags & PARPORT_DEV_EXCL) {
- if (port->physport->devices) {
- /*
- * If a device is already registered and this new
- * device wants exclusive access, then no need to
- * continue as we can not grant exclusive access to
- * this device.
- */
- pr_err("%s: cannot grant exclusive access for device %s\n",
- port->name, name);
- return NULL;
- }
- }
-
- /*
- * We up our own module reference count, and that of the port
- * on which a device is to be registered, to ensure that
- * neither of us gets unloaded while we sleep in (e.g.)
- * kmalloc.
- */
- if (!try_module_get(port->ops->owner))
- return NULL;
-
- parport_get_port(port);
-
- tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
- if (!tmp)
- goto out;
-
- tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
- if (!tmp->state)
- goto out_free_pardevice;
-
- tmp->name = name;
- tmp->port = port;
- tmp->daisy = -1;
- tmp->preempt = pf;
- tmp->wakeup = kf;
- tmp->private = handle;
- tmp->flags = flags;
- tmp->irq_func = irq_func;
- tmp->waiting = 0;
- tmp->timeout = 5 * HZ;
- tmp->devmodel = false;
-
- /* Chain this onto the list */
- tmp->prev = NULL;
- /*
- * This function must not run from an irq handler so we don' t need
- * to clear irq on the local CPU. -arca
- */
- spin_lock(&port->physport->pardevice_lock);
-
- if (flags & PARPORT_DEV_EXCL) {
- if (port->physport->devices) {
- spin_unlock(&port->physport->pardevice_lock);
- printk(KERN_DEBUG
- "%s: cannot grant exclusive access for device %s\n",
- port->name, name);
- goto out_free_all;
- }
- port->flags |= PARPORT_FLAG_EXCL;
- }
-
- tmp->next = port->physport->devices;
- wmb(); /*
- * Make sure that tmp->next is written before it's
- * added to the list; see comments marked 'no locking
- * required'
- */
- if (port->physport->devices)
- port->physport->devices->prev = tmp;
- port->physport->devices = tmp;
- spin_unlock(&port->physport->pardevice_lock);
-
- init_waitqueue_head(&tmp->wait_q);
- tmp->timeslice = parport_default_timeslice;
- tmp->waitnext = tmp->waitprev = NULL;
-
- /*
- * This has to be run as last thing since init_state may need other
- * pardevice fields. -arca
- */
- port->ops->init_state(tmp, tmp->state);
- if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
- port->proc_device = tmp;
- parport_device_proc_register(tmp);
- }
- return tmp;
-
- out_free_all:
- kfree(tmp->state);
- out_free_pardevice:
- kfree(tmp);
- out:
- parport_put_port(port);
- module_put(port->ops->owner);
-
- return NULL;
-}
-EXPORT_SYMBOL(parport_register_device);
-
-static void free_pardevice(struct device *dev)
-{
- struct pardevice *par_dev = to_pardevice(dev);
-
- kfree(par_dev->name);
- kfree(par_dev);
-}
-
-struct pardevice *
parport_register_dev_model(struct parport *port, const char *name,
const struct pardev_cb *par_dev_cb, int id)
{
@@ -996,7 +843,7 @@ void parport_unregister_device(struct pardevice *dev)
#ifdef PARPORT_PARANOID
if (!dev) {
- printk(KERN_ERR "parport_unregister_device: passed NULL\n");
+ pr_err("%s: passed NULL\n", __func__);
return;
}
#endif
@@ -1046,10 +893,7 @@ void parport_unregister_device(struct pardevice *dev)
spin_unlock_irq(&port->waitlist_lock);
kfree(dev->state);
- if (dev->devmodel)
- device_unregister(&dev->dev);
- else
- kfree(dev);
+ device_unregister(&dev->dev);
module_put(port->ops->owner);
parport_put_port(port);
@@ -1137,8 +981,7 @@ int parport_claim(struct pardevice *dev)
unsigned long flags;
if (port->cad == dev) {
- printk(KERN_INFO "%s: %s already owner\n",
- dev->port->name,dev->name);
+ pr_info("%s: %s already owner\n", dev->port->name, dev->name);
return 0;
}
@@ -1158,9 +1001,8 @@ int parport_claim(struct pardevice *dev)
* I think we'll actually deadlock rather than
* get here, but just in case..
*/
- printk(KERN_WARNING
- "%s: %s released port when preempted!\n",
- port->name, oldcad->name);
+ pr_warn("%s: %s released port when preempted!\n",
+ port->name, oldcad->name);
if (port->cad)
goto blocked;
}
@@ -1260,7 +1102,8 @@ int parport_claim_or_block(struct pardevice *dev)
r = parport_claim(dev);
if (r == -EAGAIN) {
#ifdef PARPORT_DEBUG_SHARING
- printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
+ printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n",
+ dev->name);
#endif
/*
* FIXME!!! Use the proper locking for dev->waiting,
@@ -1293,7 +1136,7 @@ int parport_claim_or_block(struct pardevice *dev)
if (dev->port->physport->cad != dev)
printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
dev->name, dev->port->physport->cad ?
- dev->port->physport->cad->name:"nobody");
+ dev->port->physport->cad->name : "nobody");
#endif
}
dev->waiting = 0;
@@ -1320,8 +1163,8 @@ void parport_release(struct pardevice *dev)
write_lock_irqsave(&port->cad_lock, flags);
if (port->cad != dev) {
write_unlock_irqrestore(&port->cad_lock, flags);
- printk(KERN_WARNING "%s: %s tried to release parport when not owner\n",
- port->name, dev->name);
+ pr_warn("%s: %s tried to release parport when not owner\n",
+ port->name, dev->name);
return;
}
@@ -1361,7 +1204,8 @@ void parport_release(struct pardevice *dev)
if (dev->port->cad) /* racy but no matter */
return;
} else {
- printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
+ pr_err("%s: don't know how to wake %s\n",
+ port->name, pd->name);
}
}
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
index 0144075b11a6..373ea8d4f7a1 100644
--- a/drivers/reset/reset-zynqmp.c
+++ b/drivers/reset/reset-zynqmp.c
@@ -15,7 +15,6 @@
struct zynqmp_reset_data {
struct reset_controller_dev rcdev;
- const struct zynqmp_eemi_ops *eemi_ops;
};
static inline struct zynqmp_reset_data *
@@ -27,28 +26,23 @@ to_zynqmp_reset_data(struct reset_controller_dev *rcdev)
static int zynqmp_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-
- return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
- PM_RESET_ACTION_ASSERT);
+ return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_ASSERT);
}
static int zynqmp_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-
- return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
- PM_RESET_ACTION_RELEASE);
+ return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_RELEASE);
}
static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
int val, err;
- err = priv->eemi_ops->reset_get_status(ZYNQMP_RESET_ID + id, &val);
+ err = zynqmp_pm_reset_get_status(ZYNQMP_RESET_ID + id, &val);
if (err)
return err;
@@ -58,10 +52,8 @@ static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
static int zynqmp_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-
- return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
- PM_RESET_ACTION_PULSE);
+ return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_PULSE);
}
static const struct reset_control_ops zynqmp_reset_ops = {
@@ -79,10 +71,6 @@ static int zynqmp_reset_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(priv->eemi_ops))
- return PTR_ERR(priv->eemi_ops);
-
platform_set_drvdata(pdev, priv);
priv->rcdev.ops = &zynqmp_reset_ops;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index fc2575fef51b..743ee7b4e63f 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1361,12 +1361,10 @@ static int of_qcom_slim_ngd_register(struct device *parent,
ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
ngd->pdev->dev.of_node = node;
ctrl->ngd = ngd;
- platform_set_drvdata(ngd->pdev, ctrl);
platform_device_add(ngd->pdev);
ngd->base = ctrl->base + ngd->id * data->offset +
(ngd->id - 1) * data->size;
- ctrl->ngd = ngd;
return 0;
}
@@ -1376,12 +1374,13 @@ static int of_qcom_slim_ngd_register(struct device *parent,
static int qcom_slim_ngd_probe(struct platform_device *pdev)
{
- struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev->parent);
int ret;
ctrl->ctrl.dev = dev;
+ platform_set_drvdata(pdev, ctrl);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND);
pm_runtime_set_suspended(dev);
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
index 23d90cb12ba9..226d343f0a6a 100644
--- a/drivers/soc/xilinx/zynqmp_pm_domains.c
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -23,8 +23,6 @@
/* Flag stating if PM nodes mapped to the PM domain has been requested */
#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
-static const struct zynqmp_eemi_ops *eemi_ops;
-
static int min_capability;
/**
@@ -76,11 +74,8 @@ static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->set_requirement)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
- ret = eemi_ops->set_requirement(pd->node_id,
+ ret = zynqmp_pm_set_requirement(pd->node_id,
ZYNQMP_PM_CAPABILITY_ACCESS,
ZYNQMP_PM_MAX_QOS,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
@@ -111,9 +106,6 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
u32 capabilities = min_capability;
bool may_wakeup;
- if (!eemi_ops->set_requirement)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If domain is already released there is nothing to be done */
@@ -134,7 +126,7 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
}
}
- ret = eemi_ops->set_requirement(pd->node_id, capabilities, 0,
+ ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0,
ZYNQMP_PM_REQUEST_ACK_NO);
/**
* If powering down of any node inside this domain fails,
@@ -163,16 +155,13 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->request_node)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If this is not the first device to attach there is nothing to do */
if (domain->device_count)
return 0;
- ret = eemi_ops->request_node(pd->node_id, 0, 0,
+ ret = zynqmp_pm_request_node(pd->node_id, 0, 0,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
/* If requesting a node fails print and return the error */
if (ret) {
@@ -199,16 +188,13 @@ static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->release_node)
- return;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If this is not the last device to detach there is nothing to do */
if (domain->device_count)
return;
- ret = eemi_ops->release_node(pd->node_id);
+ ret = zynqmp_pm_release_node(pd->node_id);
/* If releasing a node fails print the error and return */
if (ret) {
pr_err("%s() %s release failed for node %d: %d\n",
@@ -266,10 +252,6 @@ static int zynqmp_gpd_probe(struct platform_device *pdev)
struct zynqmp_pm_domain *pd;
struct device *dev = &pdev->dev;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 09227895d216..31ff49fcd078 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -30,7 +30,6 @@ struct zynqmp_pm_work_struct {
static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
static struct mbox_chan *rx_chan;
-static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
@@ -155,9 +154,6 @@ static ssize_t suspend_mode_store(struct device *dev,
{
int md, ret = -EINVAL;
- if (!eemi_ops->set_suspend_mode)
- return ret;
-
for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
if (suspend_modes[md] &&
sysfs_streq(suspend_modes[md], buf)) {
@@ -166,7 +162,7 @@ static ssize_t suspend_mode_store(struct device *dev,
}
if (!ret && md != suspend_mode) {
- ret = eemi_ops->set_suspend_mode(md);
+ ret = zynqmp_pm_set_suspend_mode(md);
if (likely(!ret))
suspend_mode = md;
}
@@ -182,15 +178,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
u32 pm_api_version;
struct mbox_client *client;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
- if (!eemi_ops->get_api_version || !eemi_ops->init_finalize)
- return -ENXIO;
-
- eemi_ops->init_finalize();
- eemi_ops->get_api_version(&pm_api_version);
+ zynqmp_pm_init_finalize();
+ zynqmp_pm_get_api_version(&pm_api_version);
/* Check PM API version number */
if (pm_api_version < ZYNQMP_PM_VERSION)
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 7412a3042a8d..811c97a7c858 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -135,7 +135,6 @@
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
-static const struct zynqmp_eemi_ops *eemi_ops;
/**
* struct zynqmp_qspi - Defines qspi driver instance
@@ -1015,10 +1014,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
struct zynqmp_qspi *xqspi;
struct device *dev = &pdev->dev;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
master = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
if (!master)
return -ENOMEM;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 6e725c6c6256..73efb80815db 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -398,7 +398,7 @@ static void uio_dev_del_attributes(struct uio_device *idev)
static int uio_get_minor(struct uio_device *idev)
{
- int retval = -ENOMEM;
+ int retval;
mutex_lock(&minor_lock);
retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index f6ab3f28c838..6e27fe4fcca3 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -44,7 +44,6 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
{
struct uio_dmem_genirq_platdata *priv = info->priv;
struct uio_mem *uiomem;
- int ret = 0;
int dmem_region = priv->dmem_region_start;
uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
@@ -68,7 +67,7 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
mutex_unlock(&priv->alloc_lock);
/* Wait until the Runtime PM code has woken up the device */
pm_runtime_get_sync(&priv->pdev->dev);
- return ret;
+ return 0;
}
static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
diff --git a/drivers/visorbus/controlvmchannel.h b/drivers/visorbus/controlvmchannel.h
index 8c57562a070a..c87213554427 100644
--- a/drivers/visorbus/controlvmchannel.h
+++ b/drivers/visorbus/controlvmchannel.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
diff --git a/drivers/visorbus/vbuschannel.h b/drivers/visorbus/vbuschannel.h
index b1dce26166bf..4aaf6564eb9f 100644
--- a/drivers/visorbus/vbuschannel.h
+++ b/drivers/visorbus/vbuschannel.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
diff --git a/drivers/visorbus/visorbus_private.h b/drivers/visorbus/visorbus_private.h
index 366380b7f8d9..6956de605827 100644
--- a/drivers/visorbus/visorbus_private.h
+++ b/drivers/visorbus/visorbus_private.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.