diff options
Diffstat (limited to 'arch/arm/common')
-rw-r--r-- | arch/arm/common/edma.c | 197 | ||||
-rw-r--r-- | arch/arm/common/mcpm_entry.c | 6 | ||||
-rw-r--r-- | arch/arm/common/mcpm_platsmp.c | 2 |
3 files changed, 100 insertions, 105 deletions
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 5339009b3c0c..485be42519b9 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c @@ -102,7 +102,13 @@ #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) #define EDMA_DCHMAP 0x0100 /* 64 registers */ -#define CHMAP_EXIST BIT(24) + +/* CCCFG register */ +#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ +#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ +#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ +#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ +#define CHMAP_EXIST BIT(24) #define EDMA_MAX_DMACH 64 #define EDMA_MAX_PARAMENTRY 512 @@ -233,7 +239,6 @@ struct edma { unsigned num_region; unsigned num_slots; unsigned num_tc; - unsigned num_cc; enum dma_event_q default_queue; /* list of channels with no even trigger; terminated by "-1" */ @@ -290,12 +295,6 @@ static void map_dmach_queue(unsigned ctlr, unsigned ch_no, ~(0x7 << bit), queue_no << bit); } -static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no) -{ - int bit = queue_no * 4; - edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit)); -} - static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, int priority) { @@ -994,29 +993,23 @@ void edma_set_dest(unsigned slot, dma_addr_t dest_port, EXPORT_SYMBOL(edma_set_dest); /** - * edma_get_position - returns the current transfer points + * edma_get_position - returns the current transfer point * @slot: parameter RAM slot being examined - * @src: pointer to source port position - * @dst: pointer to destination port position + * @dst: true selects the dest position, false the source * - * Returns current source and destination addresses for a particular - * parameter RAM slot. Its channel should not be active when this is called. + * Returns the position of the current active slot */ -void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst) +dma_addr_t edma_get_position(unsigned slot, bool dst) { - struct edmacc_param temp; - unsigned ctlr; + u32 offs, ctlr = EDMA_CTLR(slot); - ctlr = EDMA_CTLR(slot); slot = EDMA_CHAN_SLOT(slot); - edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp); - if (src != NULL) - *src = temp.src; - if (dst != NULL) - *dst = temp.dst; + offs = PARM_OFFSET(slot); + offs += dst ? PARM_DST : PARM_SRC; + + return edma_read(ctlr, offs); } -EXPORT_SYMBOL(edma_get_position); /** * edma_set_src_index - configure DMA source address indexing @@ -1421,6 +1414,67 @@ void edma_clear_event(unsigned channel) } EXPORT_SYMBOL(edma_clear_event); +static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, + struct edma *edma_cc) +{ + int i; + u32 value, cccfg; + s8 (*queue_priority_map)[2]; + + /* Decode the eDMA3 configuration from CCCFG register */ + cccfg = edma_read(0, EDMA_CCCFG); + + value = GET_NUM_REGN(cccfg); + edma_cc->num_region = BIT(value); + + value = GET_NUM_DMACH(cccfg); + edma_cc->num_channels = BIT(value + 1); + + value = GET_NUM_PAENTRY(cccfg); + edma_cc->num_slots = BIT(value + 4); + + value = GET_NUM_EVQUE(cccfg); + edma_cc->num_tc = value + 1; + + dev_dbg(dev, "eDMA3 HW configuration (cccfg: 0x%08x):\n", cccfg); + dev_dbg(dev, "num_region: %u\n", edma_cc->num_region); + dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels); + dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots); + dev_dbg(dev, "num_tc: %u\n", edma_cc->num_tc); + + /* Nothing need to be done if queue priority is provided */ + if (pdata->queue_priority_mapping) + return 0; + + /* + * Configure TC/queue priority as follows: + * Q0 - priority 0 + * Q1 - priority 1 + * Q2 - priority 2 + * ... + * The meaning of priority numbers: 0 highest priority, 7 lowest + * priority. So Q0 is the highest priority queue and the last queue has + * the lowest priority. + */ + queue_priority_map = devm_kzalloc(dev, + (edma_cc->num_tc + 1) * sizeof(s8), + GFP_KERNEL); + if (!queue_priority_map) + return -ENOMEM; + + for (i = 0; i < edma_cc->num_tc; i++) { + queue_priority_map[i][0] = i; + queue_priority_map[i][1] = i; + } + queue_priority_map[i][0] = -1; + queue_priority_map[i][1] = -1; + + pdata->queue_priority_mapping = queue_priority_map; + pdata->default_queue = 0; + + return 0; +} + #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) static int edma_xbar_event_map(struct device *dev, struct device_node *node, @@ -1471,65 +1525,16 @@ static int edma_of_parse_dt(struct device *dev, struct device_node *node, struct edma_soc_info *pdata) { - int ret = 0, i; - u32 value; + int ret = 0; struct property *prop; size_t sz; struct edma_rsv_info *rsv_info; - s8 (*queue_tc_map)[2], (*queue_priority_map)[2]; - - memset(pdata, 0, sizeof(struct edma_soc_info)); - - ret = of_property_read_u32(node, "dma-channels", &value); - if (ret < 0) - return ret; - pdata->n_channel = value; - - ret = of_property_read_u32(node, "ti,edma-regions", &value); - if (ret < 0) - return ret; - pdata->n_region = value; - - ret = of_property_read_u32(node, "ti,edma-slots", &value); - if (ret < 0) - return ret; - pdata->n_slot = value; - - pdata->n_cc = 1; rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL); if (!rsv_info) return -ENOMEM; pdata->rsv = rsv_info; - queue_tc_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL); - if (!queue_tc_map) - return -ENOMEM; - - for (i = 0; i < 3; i++) { - queue_tc_map[i][0] = i; - queue_tc_map[i][1] = i; - } - queue_tc_map[i][0] = -1; - queue_tc_map[i][1] = -1; - - pdata->queue_tc_mapping = queue_tc_map; - - queue_priority_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL); - if (!queue_priority_map) - return -ENOMEM; - - for (i = 0; i < 3; i++) { - queue_priority_map[i][0] = i; - queue_priority_map[i][1] = i; - } - queue_priority_map[i][0] = -1; - queue_priority_map[i][1] = -1; - - pdata->queue_priority_mapping = queue_priority_map; - - pdata->default_queue = 0; - prop = of_find_property(node, "ti,edma-xbar-event-map", &sz); if (prop) ret = edma_xbar_event_map(dev, node, pdata, sz); @@ -1556,6 +1561,7 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, return ERR_PTR(ret); dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap); + dma_cap_set(DMA_CYCLIC, edma_filter_info.dma_cap); of_dma_controller_register(dev->of_node, of_dma_simple_xlate, &edma_filter_info); @@ -1574,7 +1580,6 @@ static int edma_probe(struct platform_device *pdev) struct edma_soc_info **info = pdev->dev.platform_data; struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL}; s8 (*queue_priority_mapping)[2]; - s8 (*queue_tc_mapping)[2]; int i, j, off, ln, found = 0; int status = -1; const s16 (*rsv_chans)[2]; @@ -1585,7 +1590,6 @@ static int edma_probe(struct platform_device *pdev) struct resource *r[EDMA_MAX_CC] = {NULL}; struct resource res[EDMA_MAX_CC]; char res_name[10]; - char irq_name[10]; struct device_node *node = pdev->dev.of_node; struct device *dev = &pdev->dev; int ret; @@ -1650,12 +1654,10 @@ static int edma_probe(struct platform_device *pdev) if (!edma_cc[j]) return -ENOMEM; - edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel, - EDMA_MAX_DMACH); - edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot, - EDMA_MAX_PARAMENTRY); - edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc, - EDMA_MAX_CC); + /* Get eDMA3 configuration from IP */ + ret = edma_setup_from_hw(dev, info[j], edma_cc[j]); + if (ret) + return ret; edma_cc[j]->default_queue = info[j]->default_queue; @@ -1707,14 +1709,21 @@ static int edma_probe(struct platform_device *pdev) if (node) { irq[j] = irq_of_parse_and_map(node, 0); + err_irq[j] = irq_of_parse_and_map(node, 2); } else { + char irq_name[10]; + sprintf(irq_name, "edma%d", j); irq[j] = platform_get_irq_byname(pdev, irq_name); + + sprintf(irq_name, "edma%d_err", j); + err_irq[j] = platform_get_irq_byname(pdev, irq_name); } edma_cc[j]->irq_res_start = irq[j]; - status = devm_request_irq(&pdev->dev, irq[j], - dma_irq_handler, 0, "edma", - &pdev->dev); + edma_cc[j]->irq_res_end = err_irq[j]; + + status = devm_request_irq(dev, irq[j], dma_irq_handler, 0, + "edma", dev); if (status < 0) { dev_dbg(&pdev->dev, "devm_request_irq %d failed --> %d\n", @@ -1722,16 +1731,8 @@ static int edma_probe(struct platform_device *pdev) return status; } - if (node) { - err_irq[j] = irq_of_parse_and_map(node, 2); - } else { - sprintf(irq_name, "edma%d_err", j); - err_irq[j] = platform_get_irq_byname(pdev, irq_name); - } - edma_cc[j]->irq_res_end = err_irq[j]; - status = devm_request_irq(&pdev->dev, err_irq[j], - dma_ccerr_handler, 0, - "edma_error", &pdev->dev); + status = devm_request_irq(dev, err_irq[j], dma_ccerr_handler, 0, + "edma_error", dev); if (status < 0) { dev_dbg(&pdev->dev, "devm_request_irq %d failed --> %d\n", @@ -1742,14 +1743,8 @@ static int edma_probe(struct platform_device *pdev) for (i = 0; i < edma_cc[j]->num_channels; i++) map_dmach_queue(j, i, info[j]->default_queue); - queue_tc_mapping = info[j]->queue_tc_mapping; queue_priority_mapping = info[j]->queue_priority_mapping; - /* Event queue to TC mapping */ - for (i = 0; queue_tc_mapping[i][0] != -1; i++) - map_queue_tc(j, queue_tc_mapping[i][0], - queue_tc_mapping[i][1]); - /* Event queue priority mapping */ for (i = 0; queue_priority_mapping[i][0] != -1; i++) assign_priority_to_queue(j, @@ -1762,7 +1757,7 @@ static int edma_probe(struct platform_device *pdev) if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) map_dmach_param(j); - for (i = 0; i < info[j]->n_region; i++) { + for (i = 0; i < edma_cc[j]->num_region; i++) { edma_write_array2(j, EDMA_DRAE, i, 0, 0x0); edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); edma_write_array(j, EDMA_QRAE, i, 0x0); diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 86fd60fefbc9..f91136ab447e 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -106,14 +106,14 @@ void mcpm_cpu_power_down(void) BUG(); } -int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster) +int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) { int ret; - if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish)) + if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown)) return -EUNATCH; - ret = platform_ops->power_down_finish(cpu, cluster); + ret = platform_ops->wait_for_powerdown(cpu, cluster); if (ret) pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", __func__, cpu, cluster, ret); diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c index 177251a4dd9a..92e54d7c6f46 100644 --- a/arch/arm/common/mcpm_platsmp.c +++ b/arch/arm/common/mcpm_platsmp.c @@ -62,7 +62,7 @@ static int mcpm_cpu_kill(unsigned int cpu) cpu_to_pcpu(cpu, &pcpu, &pcluster); - return !mcpm_cpu_power_down_finish(pcpu, pcluster); + return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster); } static int mcpm_cpu_disable(unsigned int cpu) |