diff options
author | Andreas Gruenbacher <agruenba@redhat.com> | 2018-07-24 20:02:40 +0200 |
---|---|---|
committer | Andreas Gruenbacher <agruenba@redhat.com> | 2018-07-24 20:02:40 +0200 |
commit | a3479c7fc096a1a7a2dccbfbdc6fcf86b805711a (patch) | |
tree | 85b0044b207d05b0cf5f118f2160a8de996073ce /drivers/net/ethernet/neterion/vxge/vxge-config.c | |
parent | 109dbb1e6f27fb8f80ee61953485c7c3b1717951 (diff) | |
parent | 025d0e7f73c6a9cc3ca2fe7de821792a8f3269bf (diff) | |
download | linux-stable-a3479c7fc096a1a7a2dccbfbdc6fcf86b805711a.tar.gz linux-stable-a3479c7fc096a1a7a2dccbfbdc6fcf86b805711a.tar.bz2 linux-stable-a3479c7fc096a1a7a2dccbfbdc6fcf86b805711a.zip |
Merge branch 'iomap-write' into linux-gfs2/for-next
Pull in the gfs2 iomap-write changes: Tweak the existing code to
properly support iomap write and eliminate an unnecessary special case
in gfs2_block_map. Implement iomap write support for buffered and
direct I/O. Simplify some of the existing code and eliminate code that
is no longer used:
gfs2: Remove gfs2_write_{begin,end}
gfs2: iomap direct I/O support
gfs2: gfs2_extent_length cleanup
gfs2: iomap buffered write support
gfs2: Further iomap cleanups
This is based on the following changes on the xfs 'iomap-4.19-merge'
branch:
iomap: add private pointer to struct iomap
iomap: add a page_done callback
iomap: generic inline data handling
iomap: complete partial direct I/O writes synchronously
iomap: mark newly allocated buffer heads as new
fs: factor out a __generic_write_end helper
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'drivers/net/ethernet/neterion/vxge/vxge-config.c')
-rw-r--r-- | drivers/net/ethernet/neterion/vxge/vxge-config.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index c60da9e8bf14..358ed6118881 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -2220,22 +2220,22 @@ __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, channel->length = length; channel->vp_id = vp_id; - channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + channel->work_arr = kcalloc(length, sizeof(void *), GFP_KERNEL); if (channel->work_arr == NULL) goto exit1; - channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + channel->free_arr = kcalloc(length, sizeof(void *), GFP_KERNEL); if (channel->free_arr == NULL) goto exit1; channel->free_ptr = length; - channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + channel->reserve_arr = kcalloc(length, sizeof(void *), GFP_KERNEL); if (channel->reserve_arr == NULL) goto exit1; channel->reserve_ptr = length; channel->reserve_top = 0; - channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + channel->orig_arr = kcalloc(length, sizeof(void *), GFP_KERNEL); if (channel->orig_arr == NULL) goto exit1; @@ -2565,7 +2565,7 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, * allocate new memblock and its private part at once. * This helps to minimize memory usage a lot. */ mempool->memblocks_priv_arr[i] = - vzalloc(mempool->items_priv_size * n_items); + vzalloc(array_size(mempool->items_priv_size, n_items)); if (mempool->memblocks_priv_arr[i] == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; @@ -2665,7 +2665,7 @@ __vxge_hw_mempool_create(struct __vxge_hw_device *devh, /* allocate array of memblocks */ mempool->memblocks_arr = - vzalloc(sizeof(void *) * mempool->memblocks_max); + vzalloc(array_size(sizeof(void *), mempool->memblocks_max)); if (mempool->memblocks_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; @@ -2675,7 +2675,7 @@ __vxge_hw_mempool_create(struct __vxge_hw_device *devh, /* allocate array of private parts of items per memblocks */ mempool->memblocks_priv_arr = - vzalloc(sizeof(void *) * mempool->memblocks_max); + vzalloc(array_size(sizeof(void *), mempool->memblocks_max)); if (mempool->memblocks_priv_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; @@ -2685,8 +2685,8 @@ __vxge_hw_mempool_create(struct __vxge_hw_device *devh, /* allocate array of memblocks DMA objects */ mempool->memblocks_dma_arr = - vzalloc(sizeof(struct vxge_hw_mempool_dma) * - mempool->memblocks_max); + vzalloc(array_size(sizeof(struct vxge_hw_mempool_dma), + mempool->memblocks_max)); if (mempool->memblocks_dma_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; @@ -2695,7 +2695,8 @@ __vxge_hw_mempool_create(struct __vxge_hw_device *devh, } /* allocate hash array of items */ - mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max); + mempool->items_arr = vzalloc(array_size(sizeof(void *), + mempool->items_max)); if (mempool->items_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; |