summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <clm@fb.com>2017-01-11 06:26:12 -0800
committerChris Mason <clm@fb.com>2017-01-11 06:26:12 -0800
commit0bf70aebf12d8fa0d06967b72ca4b257eb6adf06 (patch)
tree13f6063275339627603ed85ae25d5898bf5981a7 /fs/btrfs
parent3dda13a8ad787f3d4c4f18c8c05f8eebc7ea135a (diff)
parent562a7a07bf61e2949f7cbdb6ac7537ad9e2794d1 (diff)
downloadlinux-0bf70aebf12d8fa0d06967b72ca4b257eb6adf06.tar.gz
linux-0bf70aebf12d8fa0d06967b72ca4b257eb6adf06.tar.bz2
linux-0bf70aebf12d8fa0d06967b72ca4b257eb6adf06.zip
Merge branch 'tracepoint-updates-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux into for-linus-4.10
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/async-thread.c15
-rw-r--r--fs/btrfs/inode.c2
2 files changed, 12 insertions, 5 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 63d197724519..ff0b0be92d61 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
unsigned long flags;
while (1) {
+ void *wtag;
+
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
@@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
spin_unlock_irqrestore(lock, flags);
/*
- * we don't want to call the ordered free functions
- * with the lock held though
+ * We don't want to call the ordered free functions with the
+ * lock held though. Save the work as tag for the trace event,
+ * because the callback could free the structure.
*/
+ wtag = work;
work->ordered_free(work);
- trace_btrfs_all_work_done(work);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
}
spin_unlock_irqrestore(lock, flags);
}
@@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
static void normal_work_helper(struct btrfs_work *work)
{
struct __btrfs_workqueue *wq;
+ void *wtag;
int need_order = 0;
/*
@@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work)
if (work->ordered_func)
need_order = 1;
wq = work->wq;
+ /* Safe for tracepoints in case work gets freed by the callback */
+ wtag = work;
trace_btrfs_work_sched(work);
thresh_exec_hook(wq);
@@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work)
run_ordered_work(wq);
}
if (!need_order)
- trace_btrfs_all_work_done(work);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
}
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 81b9d9d0450c..128e52489f81 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7059,7 +7059,7 @@ insert:
write_unlock(&em_tree->lock);
out:
- trace_btrfs_get_extent(root, em);
+ trace_btrfs_get_extent(root, inode, em);
btrfs_free_path(path);
if (trans) {