diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 13 | ||||
-rw-r--r-- | block/blk-tag.c | 33 | ||||
-rw-r--r-- | block/blk-throttle.c | 6 | ||||
-rw-r--r-- | block/compat_ioctl.c | 1 |
4 files changed, 22 insertions, 31 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 28d227c5ca77..e17da947f6bd 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -928,7 +928,15 @@ struct cgroup_subsys blkio_cgrp_subsys = { .css_offline = blkcg_css_offline, .css_free = blkcg_css_free, .can_attach = blkcg_can_attach, - .base_cftypes = blkcg_files, + .legacy_cftypes = blkcg_files, +#ifdef CONFIG_MEMCG + /* + * This ensures that, if available, memcg is automatically enabled + * together on the default hierarchy so that the owner cgroup can + * be retrieved from writeback pages. + */ + .depends_on = 1 << memory_cgrp_id, +#endif }; EXPORT_SYMBOL_GPL(blkio_cgrp_subsys); @@ -1120,7 +1128,8 @@ int blkcg_policy_register(struct blkcg_policy *pol) /* everything is in place, add intf files for the new policy */ if (pol->cftypes) - WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes)); + WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys, + pol->cftypes)); ret = 0; out_unlock: mutex_unlock(&blkcg_pol_mutex); diff --git a/block/blk-tag.c b/block/blk-tag.c index 3f33d8672268..a185b86741e5 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag) EXPORT_SYMBOL(blk_queue_find_tag); /** - * __blk_free_tags - release a given set of tag maintenance info + * blk_free_tags - release a given set of tag maintenance info * @bqt: the tag map to free * - * Tries to free the specified @bqt. Returns true if it was - * actually freed and false if there are still references using it + * Drop the reference count on @bqt and frees it when the last reference + * is dropped. */ -static int __blk_free_tags(struct blk_queue_tag *bqt) +void blk_free_tags(struct blk_queue_tag *bqt) { - int retval; - - retval = atomic_dec_and_test(&bqt->refcnt); - if (retval) { + if (atomic_dec_and_test(&bqt->refcnt)) { BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < bqt->max_depth); @@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) kfree(bqt); } - - return retval; } +EXPORT_SYMBOL(blk_free_tags); /** * __blk_queue_free_tags - release tag maintenance info @@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q) if (!bqt) return; - __blk_free_tags(bqt); + blk_free_tags(bqt); q->queue_tags = NULL; queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } /** - * blk_free_tags - release a given set of tag maintenance info - * @bqt: the tag map to free - * - * For externally managed @bqt frees the map. Callers of this - * function must guarantee to have released all the queues that - * might have been using this tag map. - */ -void blk_free_tags(struct blk_queue_tag *bqt) -{ - if (unlikely(!__blk_free_tags(bqt))) - BUG(); -} -EXPORT_SYMBOL(blk_free_tags); - -/** * blk_queue_free_tags - release tag maintenance info * @q: the request queue for the device * diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 3fdb21a390c1..9273d0969ebd 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -412,13 +412,13 @@ static void throtl_pd_init(struct blkcg_gq *blkg) int rw; /* - * If sane_hierarchy is enabled, we switch to properly hierarchical + * If on the default hierarchy, we switch to properly hierarchical * behavior where limits on a given throtl_grp are applied to the * whole subtree rather than just the group itself. e.g. If 16M * read_bps limit is set on the root group, the whole system can't * exceed 16M for the device. * - * If sane_hierarchy is not enabled, the broken flat hierarchy + * If not on the default hierarchy, the broken flat hierarchy * behavior is retained where all throtl_grps are treated as if * they're all separate root groups right below throtl_data. * Limits of a group don't interact with limits of other groups @@ -426,7 +426,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg) */ parent_sq = &td->service_queue; - if (cgroup_sane_behavior(blkg->blkcg->css.cgroup) && blkg->parent) + if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent) parent_sq = &blkg_to_tg(blkg->parent)->service_queue; throtl_service_queue_init(&tg->service_queue, parent_sq); diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index e0393cd2ea7f..18b282ce361e 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -691,6 +691,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKROSET: case BLKDISCARD: case BLKSECDISCARD: + case BLKZEROOUT: /* * the ones below are implemented in blkdev_locked_ioctl, * but we call blkdev_ioctl, which gets the lock for us |