1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
|
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_CMD_H_
#define _EFA_COM_CMD_H_
#include "efa_com.h"
#define EFA_GID_SIZE 16
struct efa_com_create_qp_params {
u64 rq_base_addr;
u32 send_cq_idx;
u32 recv_cq_idx;
/*
* Send descriptor ring size in bytes,
* sufficient for user-provided number of WQEs and SGL size
*/
u32 sq_ring_size_in_bytes;
/* Max number of WQEs that will be posted on send queue */
u32 sq_depth;
/* Recv descriptor ring size in bytes */
u32 rq_ring_size_in_bytes;
u32 rq_depth;
u16 pd;
u16 uarn;
u8 qp_type;
};
struct efa_com_create_qp_result {
u32 qp_handle;
u32 qp_num;
u32 sq_db_offset;
u32 rq_db_offset;
u32 llq_descriptors_offset;
u16 send_sub_cq_idx;
u16 recv_sub_cq_idx;
};
struct efa_com_modify_qp_params {
u32 modify_mask;
u32 qp_handle;
u32 qp_state;
u32 cur_qp_state;
u32 qkey;
u32 sq_psn;
u8 sq_drained_async_notify;
};
struct efa_com_query_qp_params {
u32 qp_handle;
};
struct efa_com_query_qp_result {
u32 qp_state;
u32 qkey;
u32 sq_draining;
u32 sq_psn;
};
struct efa_com_destroy_qp_params {
u32 qp_handle;
};
struct efa_com_create_cq_params {
/* cq physical base address in OS memory */
dma_addr_t dma_addr;
/* completion queue depth in # of entries */
u16 cq_depth;
u16 num_sub_cqs;
u16 uarn;
u8 entry_size_in_bytes;
};
struct efa_com_create_cq_result {
/* cq identifier */
u16 cq_idx;
/* actual cq depth in # of entries */
u16 actual_depth;
};
struct efa_com_destroy_cq_params {
u16 cq_idx;
};
struct efa_com_create_ah_params {
u16 pdn;
/* Destination address in network byte order */
u8 dest_addr[EFA_GID_SIZE];
};
struct efa_com_create_ah_result {
u16 ah;
};
struct efa_com_destroy_ah_params {
u16 ah;
u16 pdn;
};
struct efa_com_get_network_attr_result {
u8 addr[EFA_GID_SIZE];
u32 mtu;
};
struct efa_com_get_device_attr_result {
u64 page_size_cap;
u64 max_mr_pages;
u32 fw_version;
u32 admin_api_version;
u32 device_version;
u32 supported_features;
u32 phys_addr_width;
u32 virt_addr_width;
u32 max_qp;
u32 max_sq_depth; /* wqes */
u32 max_rq_depth; /* wqes */
u32 max_cq;
u32 max_cq_depth; /* cqes */
u32 inline_buf_size;
u32 max_mr;
u32 max_pd;
u32 max_ah;
u32 max_llq_size;
u16 sub_cqs_per_cq;
u16 max_sq_sge;
u16 max_rq_sge;
u8 db_bar;
};
struct efa_com_get_hw_hints_result {
u16 mmio_read_timeout;
u16 driver_watchdog_timeout;
u16 admin_completion_timeout;
u16 poll_interval;
u32 reserved[4];
};
struct efa_com_mem_addr {
u32 mem_addr_low;
u32 mem_addr_high;
};
/* Used at indirect mode page list chunks for chaining */
struct efa_com_ctrl_buff_info {
/* indicates length of the buffer pointed by control_buffer_address. */
u32 length;
/* points to control buffer (direct or indirect) */
struct efa_com_mem_addr address;
};
struct efa_com_reg_mr_params {
/* Memory region length, in bytes. */
u64 mr_length_in_bytes;
/* IO Virtual Address associated with this MR. */
u64 iova;
/* words 8:15: Physical Buffer List, each element is page-aligned. */
union {
/*
* Inline array of physical addresses of app pages
* (optimization for short region reservations)
*/
u64 inline_pbl_array[4];
/*
* Describes the next physically contiguous chunk of indirect
* page list. A page list contains physical addresses of command
* data pages. Data pages are 4KB; page list chunks are
* variable-sized.
*/
struct efa_com_ctrl_buff_info pbl;
} pbl;
/* number of pages in PBL (redundant, could be calculated) */
u32 page_num;
/* Protection Domain */
u16 pd;
/*
* phys_page_size_shift - page size is (1 << phys_page_size_shift)
* Page size is used for building the Virtual to Physical
* address mapping
*/
u8 page_shift;
/*
* permissions
* 0: local_write_enable - Write permissions: value of 1 needed
* for RQ buffers and for RDMA write:1: reserved1 - remote
* access flags, etc
*/
u8 permissions;
u8 inline_pbl;
u8 indirect;
};
struct efa_com_reg_mr_result {
/*
* To be used in conjunction with local buffers references in SQ and
* RQ WQE
*/
u32 l_key;
/*
* To be used in incoming RDMA semantics messages to refer to remotely
* accessed memory region
*/
u32 r_key;
};
struct efa_com_dereg_mr_params {
u32 l_key;
};
struct efa_com_alloc_pd_result {
u16 pdn;
};
struct efa_com_dealloc_pd_params {
u16 pdn;
};
struct efa_com_alloc_uar_result {
u16 uarn;
};
struct efa_com_dealloc_uar_params {
u16 uarn;
};
void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
int efa_com_create_qp(struct efa_com_dev *edev,
struct efa_com_create_qp_params *params,
struct efa_com_create_qp_result *res);
int efa_com_modify_qp(struct efa_com_dev *edev,
struct efa_com_modify_qp_params *params);
int efa_com_query_qp(struct efa_com_dev *edev,
struct efa_com_query_qp_params *params,
struct efa_com_query_qp_result *result);
int efa_com_destroy_qp(struct efa_com_dev *edev,
struct efa_com_destroy_qp_params *params);
int efa_com_create_cq(struct efa_com_dev *edev,
struct efa_com_create_cq_params *params,
struct efa_com_create_cq_result *result);
int efa_com_destroy_cq(struct efa_com_dev *edev,
struct efa_com_destroy_cq_params *params);
int efa_com_register_mr(struct efa_com_dev *edev,
struct efa_com_reg_mr_params *params,
struct efa_com_reg_mr_result *result);
int efa_com_dereg_mr(struct efa_com_dev *edev,
struct efa_com_dereg_mr_params *params);
int efa_com_create_ah(struct efa_com_dev *edev,
struct efa_com_create_ah_params *params,
struct efa_com_create_ah_result *result);
int efa_com_destroy_ah(struct efa_com_dev *edev,
struct efa_com_destroy_ah_params *params);
int efa_com_get_network_attr(struct efa_com_dev *edev,
struct efa_com_get_network_attr_result *result);
int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result);
int efa_com_get_hw_hints(struct efa_com_dev *edev,
struct efa_com_get_hw_hints_result *result);
int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups);
int efa_com_alloc_pd(struct efa_com_dev *edev,
struct efa_com_alloc_pd_result *result);
int efa_com_dealloc_pd(struct efa_com_dev *edev,
struct efa_com_dealloc_pd_params *params);
int efa_com_alloc_uar(struct efa_com_dev *edev,
struct efa_com_alloc_uar_result *result);
int efa_com_dealloc_uar(struct efa_com_dev *edev,
struct efa_com_dealloc_uar_params *params);
#endif /* _EFA_COM_CMD_H_ */
|