summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mana/mr.c
blob: 4f13423ecdbdf1af6a55f2e7372f2925e56234e1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
 */

#include "mana_ib.h"

#define VALID_MR_FLAGS                                                         \
	(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)

static enum gdma_mr_access_flags
mana_ib_verbs_to_gdma_access_flags(int access_flags)
{
	enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;

	if (access_flags & IB_ACCESS_LOCAL_WRITE)
		flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;

	if (access_flags & IB_ACCESS_REMOTE_WRITE)
		flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;

	if (access_flags & IB_ACCESS_REMOTE_READ)
		flags |= GDMA_ACCESS_FLAG_REMOTE_READ;

	return flags;
}

static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
				struct gdma_create_mr_params *mr_params)
{
	struct gdma_create_mr_response resp = {};
	struct gdma_create_mr_request req = {};
	struct gdma_context *gc = mdev_to_gc(dev);
	int err;

	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
			     sizeof(resp));
	req.pd_handle = mr_params->pd_handle;
	req.mr_type = mr_params->mr_type;

	switch (mr_params->mr_type) {
	case GDMA_MR_TYPE_GVA:
		req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
		req.gva.virtual_address = mr_params->gva.virtual_address;
		req.gva.access_flags = mr_params->gva.access_flags;
		break;

	default:
		ibdev_dbg(&dev->ib_dev,
			  "invalid param (GDMA_MR_TYPE) passed, type %d\n",
			  req.mr_type);
		return -EINVAL;
	}

	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);

	if (err || resp.hdr.status) {
		ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
			  resp.hdr.status);
		if (!err)
			err = -EPROTO;

		return err;
	}

	mr->ibmr.lkey = resp.lkey;
	mr->ibmr.rkey = resp.rkey;
	mr->mr_handle = resp.mr_handle;

	return 0;
}

static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
{
	struct gdma_destroy_mr_response resp = {};
	struct gdma_destroy_mr_request req = {};
	struct gdma_context *gc = mdev_to_gc(dev);
	int err;

	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
			     sizeof(resp));

	req.mr_handle = mr_handle;

	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
	if (err || resp.hdr.status) {
		dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
			resp.hdr.status);
		if (!err)
			err = -EPROTO;
		return err;
	}

	return 0;
}

struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
				  u64 iova, int access_flags,
				  struct ib_udata *udata)
{
	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
	struct gdma_create_mr_params mr_params = {};
	struct ib_device *ibdev = ibpd->device;
	struct mana_ib_dev *dev;
	struct mana_ib_mr *mr;
	u64 dma_region_handle;
	int err;

	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);

	ibdev_dbg(ibdev,
		  "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
		  start, iova, length, access_flags);

	if (access_flags & ~VALID_MR_FLAGS)
		return ERR_PTR(-EINVAL);

	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
	if (!mr)
		return ERR_PTR(-ENOMEM);

	mr->umem = ib_umem_get(ibdev, start, length, access_flags);
	if (IS_ERR(mr->umem)) {
		err = PTR_ERR(mr->umem);
		ibdev_dbg(ibdev,
			  "Failed to get umem for register user-mr, %d\n", err);
		goto err_free;
	}

	err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
	if (err) {
		ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
			  err);
		goto err_umem;
	}

	ibdev_dbg(ibdev,
		  "created dma region for user-mr 0x%llx\n",
		  dma_region_handle);

	mr_params.pd_handle = pd->pd_handle;
	mr_params.mr_type = GDMA_MR_TYPE_GVA;
	mr_params.gva.dma_region_handle = dma_region_handle;
	mr_params.gva.virtual_address = iova;
	mr_params.gva.access_flags =
		mana_ib_verbs_to_gdma_access_flags(access_flags);

	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
	if (err)
		goto err_dma_region;

	/*
	 * There is no need to keep track of dma_region_handle after MR is
	 * successfully created. The dma_region_handle is tracked in the PF
	 * as part of the lifecycle of this MR.
	 */

	return &mr->ibmr;

err_dma_region:
	mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);

err_umem:
	ib_umem_release(mr->umem);

err_free:
	kfree(mr);
	return ERR_PTR(err);
}

int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
	struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
	struct ib_device *ibdev = ibmr->device;
	struct mana_ib_dev *dev;
	int err;

	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);

	err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
	if (err)
		return err;

	if (mr->umem)
		ib_umem_release(mr->umem);

	kfree(mr);

	return 0;
}