summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2022-11-09 08:15:00 +0100
committerChristoph Hellwig <hch@lst.de>2022-11-21 09:35:52 +0100
commit82c310c33ace7d25c0475e49a6051727c48a8cc6 (patch)
tree41707c5d544c5751c9e54eda67c64be6956a74fc /drivers/infiniband/hw
parentb3dc3f8e49577840dc8ac8a365c5b3da4edb10b8 (diff)
downloadlinux-stable-82c310c33ace7d25c0475e49a6051727c48a8cc6.tar.gz
linux-stable-82c310c33ace7d25c0475e49a6051727c48a8cc6.tar.bz2
linux-stable-82c310c33ace7d25c0475e49a6051727c48a8cc6.zip
RDMA/hfi1: don't pass bogus GFP_ flags to dma_alloc_coherent
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass GFP_USER which doesn't make sense for a kernel DMA allocation or __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Jason Gunthorpe <jgg@nvidia.com> Acked-by: Dean Luick <dean.luick@cornelisnetworks.com> Tested-by: Dean Luick <dean.luick@cornelisnetworks.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/hfi1/init.c21
1 files changed, 3 insertions, 18 deletions
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 436372b31431..24c0f0d257fc 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1761,17 +1761,11 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
unsigned amt;
if (!rcd->rcvhdrq) {
- gfp_t gfp_flags;
-
amt = rcvhdrq_size(rcd);
- if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
- gfp_flags = GFP_KERNEL;
- else
- gfp_flags = GFP_USER;
rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
&rcd->rcvhdrq_dma,
- gfp_flags | __GFP_COMP);
+ GFP_KERNEL);
if (!rcd->rcvhdrq) {
dd_dev_err(dd,
@@ -1785,7 +1779,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
PAGE_SIZE,
&rcd->rcvhdrqtailaddr_dma,
- gfp_flags);
+ GFP_KERNEL);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
}
@@ -1821,20 +1815,11 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
{
struct hfi1_devdata *dd = rcd->dd;
u32 max_entries, egrtop, alloced_bytes = 0;
- gfp_t gfp_flags;
u16 order, idx = 0;
int ret = 0;
u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
/*
- * GFP_USER, but without GFP_FS, so buffer cache can be
- * coalesced (we hope); otherwise, even at order 4,
- * heavy filesystem activity makes these fail, and we can
- * use compound pages.
- */
- gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
-
- /*
* The minimum size of the eager buffers is a groups of MTU-sized
* buffers.
* The global eager_buffer_size parameter is checked against the
@@ -1864,7 +1849,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
dma_alloc_coherent(&dd->pcidev->dev,
rcd->egrbufs.rcvtid_size,
&rcd->egrbufs.buffers[idx].dma,
- gfp_flags);
+ GFP_KERNEL);
if (rcd->egrbufs.buffers[idx].addr) {
rcd->egrbufs.buffers[idx].len =
rcd->egrbufs.rcvtid_size;