diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2020-12-14 20:26:17 +0100 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2020-12-14 20:26:17 +0100 |
commit | f0f6dbaf06f4329dbd07594ffcd55edf27ee4b45 (patch) | |
tree | b8c70358f5693ec89db9974c493cb3e8bf976f36 /drivers/opp | |
parent | 0477e92881850d44910a7e94fc2c46f96faa131f (diff) | |
parent | 2c07b0fd9bf6dfb0bdf05aac018e6b3242d60822 (diff) | |
download | linux-f0f6dbaf06f4329dbd07594ffcd55edf27ee4b45.tar.gz linux-f0f6dbaf06f4329dbd07594ffcd55edf27ee4b45.tar.bz2 linux-f0f6dbaf06f4329dbd07594ffcd55edf27ee4b45.zip |
Merge branch 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm
Pull OPP (Operating Performance Points) updates for 5.11-rc1 from
Viresh Kumar:
"This contains the following updates:
- Allow empty (node-less) OPP tables in DT for passing just the
dependency related information (Nicola Mazzucato).
- Fix a potential lockdep in OPP core and other OPP core cleanups
(Viresh Kumar).
- Don't abuse dev_pm_opp_get_opp_table() to create an OPP table, fix
cpufreq-dt driver for the same (Viresh Kumar).
- dev_pm_opp_put_regulators() accepts a NULL argument now, updates to
all the users as well (Viresh Kumar)."
* 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm:
opp: of: Allow empty opp-table with opp-shared
dt-bindings: opp: Allow empty OPP tables
media: venus: dev_pm_opp_put_*() accepts NULL argument
drm/panfrost: dev_pm_opp_put_*() accepts NULL argument
drm/lima: dev_pm_opp_put_*() accepts NULL argument
PM / devfreq: exynos: dev_pm_opp_put_*() accepts NULL argument
cpufreq: qcom-cpufreq-nvmem: dev_pm_opp_put_*() accepts NULL argument
cpufreq: dt: dev_pm_opp_put_regulators() accepts NULL argument
opp: Allow dev_pm_opp_put_*() APIs to accept NULL opp_table
opp: Don't create an OPP table from dev_pm_opp_get_opp_table()
cpufreq: dt: Don't (ab)use dev_pm_opp_get_opp_table() to create OPP table
opp: Reduce the size of critical section in _opp_kref_release()
opp: Don't return opp_dev from _find_opp_dev()
opp: Allocate the OPP table outside of opp_table_lock
opp: Always add entries in dev_list with opp_table->lock held
Diffstat (limited to 'drivers/opp')
-rw-r--r-- | drivers/opp/core.c | 228 | ||||
-rw-r--r-- | drivers/opp/of.c | 16 | ||||
-rw-r--r-- | drivers/opp/opp.h | 1 |
3 files changed, 146 insertions, 99 deletions
diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 0e0a5269dc82..4268eb359915 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -29,32 +29,32 @@ LIST_HEAD(opp_tables); /* Lock to allow exclusive modification to the device and opp lists */ DEFINE_MUTEX(opp_table_lock); +/* Flag indicating that opp_tables list is being updated at the moment */ +static bool opp_tables_busy; -static struct opp_device *_find_opp_dev(const struct device *dev, - struct opp_table *opp_table) +static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table) { struct opp_device *opp_dev; + bool found = false; + mutex_lock(&opp_table->lock); list_for_each_entry(opp_dev, &opp_table->dev_list, node) - if (opp_dev->dev == dev) - return opp_dev; + if (opp_dev->dev == dev) { + found = true; + break; + } - return NULL; + mutex_unlock(&opp_table->lock); + return found; } static struct opp_table *_find_opp_table_unlocked(struct device *dev) { struct opp_table *opp_table; - bool found; list_for_each_entry(opp_table, &opp_tables, node) { - mutex_lock(&opp_table->lock); - found = !!_find_opp_dev(dev, opp_table); - mutex_unlock(&opp_table->lock); - - if (found) { + if (_find_opp_dev(dev, opp_table)) { _get_opp_table_kref(opp_table); - return opp_table; } } @@ -1036,8 +1036,8 @@ static void _remove_opp_dev(struct opp_device *opp_dev, kfree(opp_dev); } -static struct opp_device *_add_opp_dev_unlocked(const struct device *dev, - struct opp_table *opp_table) +struct opp_device *_add_opp_dev(const struct device *dev, + struct opp_table *opp_table) { struct opp_device *opp_dev; @@ -1048,7 +1048,9 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev, /* Initialize opp-dev */ opp_dev->dev = dev; + mutex_lock(&opp_table->lock); list_add(&opp_dev->node, &opp_table->dev_list); + mutex_unlock(&opp_table->lock); /* Create debugfs entries for the opp_table */ opp_debug_register(opp_dev, opp_table); @@ -1056,18 +1058,6 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev, return opp_dev; } -struct opp_device *_add_opp_dev(const struct device *dev, - struct opp_table *opp_table) -{ - struct opp_device *opp_dev; - - mutex_lock(&opp_table->lock); - opp_dev = _add_opp_dev_unlocked(dev, opp_table); - mutex_unlock(&opp_table->lock); - - return opp_dev; -} - static struct opp_table *_allocate_opp_table(struct device *dev, int index) { struct opp_table *opp_table; @@ -1121,8 +1111,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) INIT_LIST_HEAD(&opp_table->opp_list); kref_init(&opp_table->kref); - /* Secure the device table modification */ - list_add(&opp_table->node, &opp_tables); return opp_table; err: @@ -1135,27 +1123,64 @@ void _get_opp_table_kref(struct opp_table *opp_table) kref_get(&opp_table->kref); } -static struct opp_table *_opp_get_opp_table(struct device *dev, int index) +/* + * We need to make sure that the OPP table for a device doesn't get added twice, + * if this routine gets called in parallel with the same device pointer. + * + * The simplest way to enforce that is to perform everything (find existing + * table and if not found, create a new one) under the opp_table_lock, so only + * one creator gets access to the same. But that expands the critical section + * under the lock and may end up causing circular dependencies with frameworks + * like debugfs, interconnect or clock framework as they may be direct or + * indirect users of OPP core. + * + * And for that reason we have to go for a bit tricky implementation here, which + * uses the opp_tables_busy flag to indicate if another creator is in the middle + * of adding an OPP table and others should wait for it to finish. + */ +struct opp_table *_add_opp_table_indexed(struct device *dev, int index) { struct opp_table *opp_table; - /* Hold our table modification lock here */ +again: mutex_lock(&opp_table_lock); opp_table = _find_opp_table_unlocked(dev); if (!IS_ERR(opp_table)) goto unlock; + /* + * The opp_tables list or an OPP table's dev_list is getting updated by + * another user, wait for it to finish. + */ + if (unlikely(opp_tables_busy)) { + mutex_unlock(&opp_table_lock); + cpu_relax(); + goto again; + } + + opp_tables_busy = true; opp_table = _managed_opp(dev, index); + + /* Drop the lock to reduce the size of critical section */ + mutex_unlock(&opp_table_lock); + if (opp_table) { - if (!_add_opp_dev_unlocked(dev, opp_table)) { + if (!_add_opp_dev(dev, opp_table)) { dev_pm_opp_put_opp_table(opp_table); opp_table = ERR_PTR(-ENOMEM); } - goto unlock; + + mutex_lock(&opp_table_lock); + } else { + opp_table = _allocate_opp_table(dev, index); + + mutex_lock(&opp_table_lock); + if (!IS_ERR(opp_table)) + list_add(&opp_table->node, &opp_tables); } - opp_table = _allocate_opp_table(dev, index); + opp_tables_busy = false; unlock: mutex_unlock(&opp_table_lock); @@ -1163,17 +1188,16 @@ unlock: return opp_table; } -struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) +struct opp_table *_add_opp_table(struct device *dev) { - return _opp_get_opp_table(dev, 0); + return _add_opp_table_indexed(dev, 0); } -EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table); -struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, - int index) +struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) { - return _opp_get_opp_table(dev, index); + return _find_opp_table(dev); } +EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table); static void _opp_table_kref_release(struct kref *kref) { @@ -1227,9 +1251,14 @@ void _opp_free(struct dev_pm_opp *opp) kfree(opp); } -static void _opp_kref_release(struct dev_pm_opp *opp, - struct opp_table *opp_table) +static void _opp_kref_release(struct kref *kref) { + struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); + struct opp_table *opp_table = opp->opp_table; + + list_del(&opp->node); + mutex_unlock(&opp_table->lock); + /* * Notify the changes in the availability of the operable * frequency/voltage list. @@ -1237,27 +1266,9 @@ static void _opp_kref_release(struct dev_pm_opp *opp, blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); _of_opp_free_required_opps(opp_table, opp); opp_debug_remove_one(opp); - list_del(&opp->node); kfree(opp); } -static void _opp_kref_release_unlocked(struct kref *kref) -{ - struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); - struct opp_table *opp_table = opp->opp_table; - - _opp_kref_release(opp, opp_table); -} - -static void _opp_kref_release_locked(struct kref *kref) -{ - struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); - struct opp_table *opp_table = opp->opp_table; - - _opp_kref_release(opp, opp_table); - mutex_unlock(&opp_table->lock); -} - void dev_pm_opp_get(struct dev_pm_opp *opp) { kref_get(&opp->kref); @@ -1265,16 +1276,10 @@ void dev_pm_opp_get(struct dev_pm_opp *opp) void dev_pm_opp_put(struct dev_pm_opp *opp) { - kref_put_mutex(&opp->kref, _opp_kref_release_locked, - &opp->opp_table->lock); + kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); } EXPORT_SYMBOL_GPL(dev_pm_opp_put); -static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp) -{ - kref_put(&opp->kref, _opp_kref_release_unlocked); -} - /** * dev_pm_opp_remove() - Remove an OPP from OPP table * @dev: device for which we do this operation @@ -1318,30 +1323,49 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); +static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, + bool dynamic) +{ + struct dev_pm_opp *opp = NULL, *temp; + + mutex_lock(&opp_table->lock); + list_for_each_entry(temp, &opp_table->opp_list, node) { + if (dynamic == temp->dynamic) { + opp = temp; + break; + } + } + + mutex_unlock(&opp_table->lock); + return opp; +} + bool _opp_remove_all_static(struct opp_table *opp_table) { - struct dev_pm_opp *opp, *tmp; - bool ret = true; + struct dev_pm_opp *opp; mutex_lock(&opp_table->lock); if (!opp_table->parsed_static_opps) { - ret = false; - goto unlock; + mutex_unlock(&opp_table->lock); + return false; } - if (--opp_table->parsed_static_opps) - goto unlock; - - list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { - if (!opp->dynamic) - dev_pm_opp_put_unlocked(opp); + if (--opp_table->parsed_static_opps) { + mutex_unlock(&opp_table->lock); + return true; } -unlock: mutex_unlock(&opp_table->lock); - return ret; + /* + * Can't remove the OPP from under the lock, debugfs removal needs to + * happen lock less to avoid circular dependency issues. + */ + while ((opp = _opp_get_next(opp_table, false))) + dev_pm_opp_put(opp); + + return true; } /** @@ -1353,21 +1377,21 @@ unlock: void dev_pm_opp_remove_all_dynamic(struct device *dev) { struct opp_table *opp_table; - struct dev_pm_opp *opp, *temp; + struct dev_pm_opp *opp; int count = 0; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return; - mutex_lock(&opp_table->lock); - list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) { - if (opp->dynamic) { - dev_pm_opp_put_unlocked(opp); - count++; - } + /* + * Can't remove the OPP from under the lock, debugfs removal needs to + * happen lock less to avoid circular dependency issues. + */ + while ((opp = _opp_get_next(opp_table, true))) { + dev_pm_opp_put(opp); + count++; } - mutex_unlock(&opp_table->lock); /* Drop the references taken by dev_pm_opp_add() */ while (count--) @@ -1602,7 +1626,7 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, { struct opp_table *opp_table; - opp_table = dev_pm_opp_get_opp_table(dev); + opp_table = _add_opp_table(dev); if (IS_ERR(opp_table)) return opp_table; @@ -1636,6 +1660,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); */ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) { + if (unlikely(!opp_table)) + return; + /* Make sure there are no concurrent readers while updating opp_table */ WARN_ON(!list_empty(&opp_table->opp_list)); @@ -1661,7 +1688,7 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) { struct opp_table *opp_table; - opp_table = dev_pm_opp_get_opp_table(dev); + opp_table = _add_opp_table(dev); if (IS_ERR(opp_table)) return opp_table; @@ -1692,6 +1719,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); */ void dev_pm_opp_put_prop_name(struct opp_table *opp_table) { + if (unlikely(!opp_table)) + return; + /* Make sure there are no concurrent readers while updating opp_table */ WARN_ON(!list_empty(&opp_table->opp_list)); @@ -1754,7 +1784,7 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, struct regulator *reg; int ret, i; - opp_table = dev_pm_opp_get_opp_table(dev); + opp_table = _add_opp_table(dev); if (IS_ERR(opp_table)) return opp_table; @@ -1820,6 +1850,9 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) { int i; + if (unlikely(!opp_table)) + return; + if (!opp_table->regulators) goto put_opp_table; @@ -1862,7 +1895,7 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) struct opp_table *opp_table; int ret; - opp_table = dev_pm_opp_get_opp_table(dev); + opp_table = _add_opp_table(dev); if (IS_ERR(opp_table)) return opp_table; @@ -1902,6 +1935,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname); */ void dev_pm_opp_put_clkname(struct opp_table *opp_table) { + if (unlikely(!opp_table)) + return; + /* Make sure there are no concurrent readers while updating opp_table */ WARN_ON(!list_empty(&opp_table->opp_list)); @@ -1930,7 +1966,7 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, if (!set_opp) return ERR_PTR(-EINVAL); - opp_table = dev_pm_opp_get_opp_table(dev); + opp_table = _add_opp_table(dev); if (IS_ERR(opp_table)) return opp_table; @@ -1957,6 +1993,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper); */ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) { + if (unlikely(!opp_table)) + return; + /* Make sure there are no concurrent readers while updating opp_table */ WARN_ON(!list_empty(&opp_table->opp_list)); @@ -2014,7 +2053,7 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, int index = 0, ret = -EINVAL; const char **name = names; - opp_table = dev_pm_opp_get_opp_table(dev); + opp_table = _add_opp_table(dev); if (IS_ERR(opp_table)) return opp_table; @@ -2085,6 +2124,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd); */ void dev_pm_opp_detach_genpd(struct opp_table *opp_table) { + if (unlikely(!opp_table)) + return; + /* * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting * used in parallel. @@ -2179,7 +2221,7 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) struct opp_table *opp_table; int ret; - opp_table = dev_pm_opp_get_opp_table(dev); + opp_table = _add_opp_table(dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 9faeb83e4b32..d41088578aab 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -112,8 +112,6 @@ static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np) struct opp_table *opp_table; struct device_node *opp_table_np; - lockdep_assert_held(&opp_table_lock); - opp_table_np = of_get_parent(opp_np); if (!opp_table_np) goto err; @@ -121,12 +119,15 @@ static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np) /* It is safe to put the node now as all we need now is its address */ of_node_put(opp_table_np); + mutex_lock(&opp_table_lock); list_for_each_entry(opp_table, &opp_tables, node) { if (opp_table_np == opp_table->np) { _get_opp_table_kref(opp_table); + mutex_unlock(&opp_table_lock); return opp_table; } } + mutex_unlock(&opp_table_lock); err: return ERR_PTR(-ENODEV); @@ -169,7 +170,8 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, /* Traversing the first OPP node is all we need */ np = of_get_next_available_child(opp_np, NULL); if (!np) { - dev_err(dev, "Empty OPP table\n"); + dev_warn(dev, "Empty OPP table\n"); + return; } @@ -377,7 +379,9 @@ int dev_pm_opp_of_find_icc_paths(struct device *dev, struct icc_path **paths; ret = _bandwidth_supported(dev, opp_table); - if (ret <= 0) + if (ret == -EINVAL) + return 0; /* Empty OPP table is a valid corner-case, let's not fail */ + else if (ret <= 0) return ret; ret = 0; @@ -974,7 +978,7 @@ int dev_pm_opp_of_add_table(struct device *dev) struct opp_table *opp_table; int ret; - opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0); + opp_table = _add_opp_table_indexed(dev, 0); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); @@ -1029,7 +1033,7 @@ int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) index = 0; } - opp_table = dev_pm_opp_get_opp_table_indexed(dev, index); + opp_table = _add_opp_table_indexed(dev, index); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index ebd930e0b3ca..4ced7ffa8158 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -224,6 +224,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *o int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic); void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu); struct opp_table *_add_opp_table(struct device *dev); +struct opp_table *_add_opp_table_indexed(struct device *dev, int index); void _put_opp_list_kref(struct opp_table *opp_table); #ifdef CONFIG_OF |