summaryrefslogtreecommitdiffstats
path: root/fs/notify/mark.c
diff options
context:
space:
mode:
authorElena Reshetova <elena.reshetova@intel.com>2017-10-20 13:26:02 +0300
committerJan Kara <jack@suse.cz>2017-10-31 17:54:56 +0100
commitab97f87325e28b7ef7717e6cb62e8da14a7176e1 (patch)
tree7ce8f0a747efaff1907f3774479dcbc2f4762c48 /fs/notify/mark.c
parent6685df31255493c3f0e9e0b8bf885e4c9762fc5d (diff)
downloadlinux-ab97f87325e28b7ef7717e6cb62e8da14a7176e1.tar.gz
linux-ab97f87325e28b7ef7717e6cb62e8da14a7176e1.tar.bz2
linux-ab97f87325e28b7ef7717e6cb62e8da14a7176e1.zip
fsnotify: convert fsnotify_mark.refcnt from atomic_t to refcount_t
atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable fsnotify_mark.refcnt is used as pure reference counter. Convert it to refcount_t and fix up the operations. Suggested-by: Kees Cook <keescook@chromium.org> Reviewed-by: David Windsor <dwindsor@gmail.com> Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/notify/mark.c')
-rw-r--r--fs/notify/mark.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index f3a32ea15b49..e9191b416434 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -105,8 +105,8 @@ static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn);
void fsnotify_get_mark(struct fsnotify_mark *mark)
{
- WARN_ON_ONCE(!atomic_read(&mark->refcnt));
- atomic_inc(&mark->refcnt);
+ WARN_ON_ONCE(!refcount_read(&mark->refcnt));
+ refcount_inc(&mark->refcnt);
}
static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
@@ -201,7 +201,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
/* Catch marks that were actually never attached to object */
if (!mark->connector) {
- if (atomic_dec_and_test(&mark->refcnt))
+ if (refcount_dec_and_test(&mark->refcnt))
fsnotify_final_mark_destroy(mark);
return;
}
@@ -210,7 +210,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
* We have to be careful so that traversals of obj_list under lock can
* safely grab mark reference.
*/
- if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock))
+ if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
return;
conn = mark->connector;
@@ -258,7 +258,7 @@ static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
if (!mark)
return true;
- if (atomic_inc_not_zero(&mark->refcnt)) {
+ if (refcount_inc_not_zero(&mark->refcnt)) {
spin_lock(&mark->lock);
if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
/* mark is attached, group is still alive then */
@@ -335,7 +335,7 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
- atomic_read(&mark->refcnt) < 1 +
+ refcount_read(&mark->refcnt) < 1 +
!!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
spin_lock(&mark->lock);
@@ -737,7 +737,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
{
memset(mark, 0, sizeof(*mark));
spin_lock_init(&mark->lock);
- atomic_set(&mark->refcnt, 1);
+ refcount_set(&mark->refcnt, 1);
fsnotify_get_group(group);
mark->group = group;
}