diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-09 12:59:24 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-09 12:59:24 -0800 |
commit | db0c2bf69aa095d4a6de7b1145f29fe9a7c0f6a3 (patch) | |
tree | 8f38957c01b18edddd44d49ecc3beeac08a20b4e /block | |
parent | ac69e0928054ff29a5049902fb477f9c7605c773 (diff) | |
parent | 0d19ea866562e46989412a0676412fa0983c9ce7 (diff) | |
download | linux-db0c2bf69aa095d4a6de7b1145f29fe9a7c0f6a3.tar.gz linux-db0c2bf69aa095d4a6de7b1145f29fe9a7c0f6a3.tar.bz2 linux-db0c2bf69aa095d4a6de7b1145f29fe9a7c0f6a3.zip |
Merge branch 'for-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
* 'for-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: (21 commits)
cgroup: fix to allow mounting a hierarchy by name
cgroup: move assignement out of condition in cgroup_attach_proc()
cgroup: Remove task_lock() from cgroup_post_fork()
cgroup: add sparse annotation to cgroup_iter_start() and cgroup_iter_end()
cgroup: mark cgroup_rmdir_waitq and cgroup_attach_proc() as static
cgroup: only need to check oldcgrp==newgrp once
cgroup: remove redundant get/put of task struct
cgroup: remove redundant get/put of old css_set from migrate
cgroup: Remove unnecessary task_lock before fetching css_set on migration
cgroup: Drop task_lock(parent) on cgroup_fork()
cgroups: remove redundant get/put of css_set from css_set_check_fetched()
resource cgroups: remove bogus cast
cgroup: kill subsys->can_attach_task(), pre_attach() and attach_task()
cgroup, cpuset: don't use ss->pre_attach()
cgroup: don't use subsys->can_attach_task() or ->attach_task()
cgroup: introduce cgroup_taskset and use it in subsys->can_attach(), cancel_attach() and attach()
cgroup: improve old cgroup handling in cgroup_attach_proc()
cgroup: always lock threadgroup during migration
threadgroup: extend threadgroup_lock() to cover exit and exec
threadgroup: rename signal->threadgroup_fork_lock to ->group_rwsem
...
Fix up conflict in kernel/cgroup.c due to commit e0197aae59e5: "cgroups:
fix a css_set not found bug in cgroup_attach_proc" that already
mentioned that the bug is fixed (differently) in Tejun's cgroup
patchset. This one, in other words.
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 45 |
1 files changed, 28 insertions, 17 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 8f630cec906e..b8c143d68ee0 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -30,8 +30,10 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup); static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, struct cgroup *); -static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *); -static void blkiocg_attach_task(struct cgroup *, struct task_struct *); +static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, + struct cgroup_taskset *); +static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, + struct cgroup_taskset *); static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); @@ -44,8 +46,8 @@ static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); struct cgroup_subsys blkio_subsys = { .name = "blkio", .create = blkiocg_create, - .can_attach_task = blkiocg_can_attach_task, - .attach_task = blkiocg_attach_task, + .can_attach = blkiocg_can_attach, + .attach = blkiocg_attach, .destroy = blkiocg_destroy, .populate = blkiocg_populate, #ifdef CONFIG_BLK_CGROUP @@ -1626,30 +1628,39 @@ done: * of the main cic data structures. For now we allow a task to change * its cgroup only if it's the only owner of its ioc. */ -static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) +static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, + struct cgroup_taskset *tset) { + struct task_struct *task; struct io_context *ioc; int ret = 0; /* task_lock() is needed to avoid races with exit_io_context() */ - task_lock(tsk); - ioc = tsk->io_context; - if (ioc && atomic_read(&ioc->nr_tasks) > 1) - ret = -EINVAL; - task_unlock(tsk); - + cgroup_taskset_for_each(task, cgrp, tset) { + task_lock(task); + ioc = task->io_context; + if (ioc && atomic_read(&ioc->nr_tasks) > 1) + ret = -EINVAL; + task_unlock(task); + if (ret) + break; + } return ret; } -static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk) +static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, + struct cgroup_taskset *tset) { + struct task_struct *task; struct io_context *ioc; - task_lock(tsk); - ioc = tsk->io_context; - if (ioc) - ioc->cgroup_changed = 1; - task_unlock(tsk); + cgroup_taskset_for_each(task, cgrp, tset) { + task_lock(task); + ioc = task->io_context; + if (ioc) + ioc->cgroup_changed = 1; + task_unlock(task); + } } void blkio_policy_register(struct blkio_policy_type *blkiop) |