1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
|
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_CMDS_H_
#define _EFA_ADMIN_CMDS_H_
#define EFA_ADMIN_API_VERSION_MAJOR 0
#define EFA_ADMIN_API_VERSION_MINOR 1
/* EFA admin queue opcodes */
enum efa_admin_aq_opcode {
EFA_ADMIN_CREATE_QP = 1,
EFA_ADMIN_MODIFY_QP = 2,
EFA_ADMIN_QUERY_QP = 3,
EFA_ADMIN_DESTROY_QP = 4,
EFA_ADMIN_CREATE_AH = 5,
EFA_ADMIN_DESTROY_AH = 6,
EFA_ADMIN_REG_MR = 7,
EFA_ADMIN_DEREG_MR = 8,
EFA_ADMIN_CREATE_CQ = 9,
EFA_ADMIN_DESTROY_CQ = 10,
EFA_ADMIN_GET_FEATURE = 11,
EFA_ADMIN_SET_FEATURE = 12,
EFA_ADMIN_GET_STATS = 13,
EFA_ADMIN_ALLOC_PD = 14,
EFA_ADMIN_DEALLOC_PD = 15,
EFA_ADMIN_ALLOC_UAR = 16,
EFA_ADMIN_DEALLOC_UAR = 17,
EFA_ADMIN_MAX_OPCODE = 17,
};
enum efa_admin_aq_feature_id {
EFA_ADMIN_DEVICE_ATTR = 1,
EFA_ADMIN_AENQ_CONFIG = 2,
EFA_ADMIN_NETWORK_ATTR = 3,
EFA_ADMIN_QUEUE_ATTR = 4,
EFA_ADMIN_HW_HINTS = 5,
EFA_ADMIN_FEATURES_OPCODE_NUM = 8,
};
/* QP transport type */
enum efa_admin_qp_type {
/* Unreliable Datagram */
EFA_ADMIN_QP_TYPE_UD = 1,
/* Scalable Reliable Datagram */
EFA_ADMIN_QP_TYPE_SRD = 2,
};
/* QP state */
enum efa_admin_qp_state {
EFA_ADMIN_QP_STATE_RESET = 0,
EFA_ADMIN_QP_STATE_INIT = 1,
EFA_ADMIN_QP_STATE_RTR = 2,
EFA_ADMIN_QP_STATE_RTS = 3,
EFA_ADMIN_QP_STATE_SQD = 4,
EFA_ADMIN_QP_STATE_SQE = 5,
EFA_ADMIN_QP_STATE_ERR = 6,
};
enum efa_admin_get_stats_type {
EFA_ADMIN_GET_STATS_TYPE_BASIC = 0,
};
enum efa_admin_get_stats_scope {
EFA_ADMIN_GET_STATS_SCOPE_ALL = 0,
EFA_ADMIN_GET_STATS_SCOPE_QUEUE = 1,
};
enum efa_admin_modify_qp_mask_bits {
EFA_ADMIN_QP_STATE_BIT = 0,
EFA_ADMIN_CUR_QP_STATE_BIT = 1,
EFA_ADMIN_QKEY_BIT = 2,
EFA_ADMIN_SQ_PSN_BIT = 3,
EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT = 4,
};
/*
* QP allocation sizes, converted by fabric QueuePair (QP) create command
* from QP capabilities.
*/
struct efa_admin_qp_alloc_size {
/* Send descriptor ring size in bytes */
u32 send_queue_ring_size;
/* Max number of WQEs that can be outstanding on send queue. */
u32 send_queue_depth;
/*
* Recv descriptor ring size in bytes, sufficient for user-provided
* number of WQEs
*/
u32 recv_queue_ring_size;
/* Max number of WQEs that can be outstanding on recv queue */
u32 recv_queue_depth;
};
struct efa_admin_create_qp_cmd {
/* Common Admin Queue descriptor */
struct efa_admin_aq_common_desc aq_common_desc;
/* Protection Domain associated with this QP */
u16 pd;
/* QP type */
u8 qp_type;
/*
* 0 : sq_virt - If set, SQ ring base address is
* virtual (IOVA returned by MR registration)
* 1 : rq_virt - If set, RQ ring base address is
* virtual (IOVA returned by MR registration)
* 7:2 : reserved - MBZ
*/
u8 flags;
/*
* Send queue (SQ) ring base physical address. This field is not
* used if this is a Low Latency Queue(LLQ).
*/
u64 sq_base_addr;
/* Receive queue (RQ) ring base address. */
u64 rq_base_addr;
/* Index of CQ to be associated with Send Queue completions */
u32 send_cq_idx;
/* Index of CQ to be associated with Recv Queue completions */
u32 recv_cq_idx;
/*
* Memory registration key for the SQ ring, used only when not in
* LLQ mode and base address is virtual
*/
u32 sq_l_key;
/*
* Memory registration key for the RQ ring, used only when base
* address is virtual
*/
u32 rq_l_key;
/* Requested QP allocation sizes */
struct efa_admin_qp_alloc_size qp_alloc_size;
/* UAR number */
u16 uar;
/* MBZ */
u16 reserved;
/* MBZ */
u32 reserved2;
};
struct efa_admin_create_qp_resp {
/* Common Admin Queue completion descriptor */
struct efa_admin_acq_common_desc acq_common_desc;
/*
* Opaque handle to be used for consequent admin operations on the
* QP
*/
u32 qp_handle;
/*
* QP number in the given EFA virtual device. Least-significant bits
* (as needed according to max_qp) carry unique QP ID
*/
u16 qp_num;
/* MBZ */
u16 reserved;
/* Index of sub-CQ for Send Queue completions */
u16 send_sub_cq_idx;
/* Index of sub-CQ for Receive Queue completions */
u16 recv_sub_cq_idx;
/* SQ doorbell address, as offset to PCIe DB BAR */
u32 sq_db_offset;
/* RQ doorbell address, as offset to PCIe DB BAR */
u32 rq_db_offset;
/*
* low latency send queue ring base address as an offset to PCIe
* MMIO LLQ_MEM BAR
*/
u32 llq_descriptors_offset;
};
struct efa_admin_modify_qp_cmd {
/* Common Admin Queue descriptor */
struct efa_admin_aq_common_desc aq_common_desc;
/*
* Mask indicating which fields should be updated see enum
* efa_admin_modify_qp_mask_bits
*/
u32 modify_mask;
/* QP handle returned by create_qp command */
u32 qp_handle;
/* QP state */
u32 qp_state;
/* Override current QP state (before applying the transition) */
u32 cur_qp_state;
/* QKey */
u32 qkey;
/* SQ PSN */
u32 sq_psn;
/* Enable async notification when SQ is drained */
u8 sq_drained_async_notify;
/* MBZ */
u8 reserved1;
/* MBZ */
u16 reserved2;
};
struct efa_admin_modify_qp_resp {
/* Common Admin Queue completion descriptor */
struct efa_admin_acq_common_desc acq_common_desc;
};
struct efa_admin_query_qp_cmd {
/* Common Admin Queue descriptor */
struct efa_admin_aq_common_desc aq_common_desc;
/* QP handle returned by create_qp command */
u32 qp_handle;
};
struct efa_admin_query_qp_resp {
/* Common Admin Queue completion descriptor */
struct efa_admin_acq_common_desc acq_common_desc;
/* QP state */
u32 qp_state;
/* QKey */
u32 qkey;
/* SQ PSN */
u32 sq_psn;
/* Indicates that draining is in progress */
u8 sq_draining;
/* MBZ */
u8 reserved1;
/* MBZ */
u16 reserved2;
};
struct efa_admin_destroy_qp_cmd {
/* Common Admin Queue descriptor */
struct efa_admin_aq_common_desc aq_common_desc;
/* QP handle returned by create_qp command */
u32 qp_handle;
};
struct efa_admin_destroy_qp_resp {
/* Common Admin Queue completion descriptor */
struct efa_admin_acq_common_desc acq_common_desc;
};
/*
* Create Address Handle command parameters. Must not be called more than
* once for the same destination
*/
struct efa_admin_create_ah_cmd {
/* Common Admin Queue descriptor */
struct efa_admin_aq_common_desc aq_common_desc;
/* Destination address in network byte order */
u8 dest_addr[16];
/* PD number */
u16 pd;
/* MBZ */
u16 reserved;
};
struct efa_admin_create_ah_resp {
/* Common Admin Queue completion descriptor */
struct efa_admin_acq_common_desc acq_common_desc;
/* Target interface address handle (opaque) */
u16 ah;
/* MBZ */
u16 reserved;
};
struct efa_admin_destroy_ah_cmd {
/* Common Admin Queue descriptor */
struct efa_admin_aq_common_desc aq_common_desc;
/* Target interface address handle (opaque) */
u16 ah;
/* PD number */
u16 pd;
};
struct efa_admin_destroy_ah_resp {
/* Common Admin Queue completion descriptor */
struct efa_admin_acq_common_desc acq_common_desc;
};
/*
* Registration of MemoryRegion, required for QP working with Virtual
* Addresses. In standard verbs semantics, region length is limited to 2GB
* space, but EFA offers larger MR support for large memory space, to ease
* on users working with very large datasets (i.e. full GPU memory mapping).
*/
struct efa_admin_reg_mr_cmd {
/* Common Admin Queue descriptor */
struct efa_admin_aq_common_desc aq_common_desc;
/* Protection Domain */
u16 pd;
/* MBZ */
u16 reserved16_w1;
/* Physical Buffer List, each element is page-aligned. */
union {
/*
* Inline array of guest-physical page addresses of user
* memory pages (optimization for short region
* registrations)
*/
u64 inline_pbl_array[4];
/* points to PBL (direct or indirect, chained if needed) */
struct efa_admin_ctrl_buff_info pbl;
} pbl;
/* Memory region length, in bytes. */
u64 mr_length;
/*
* flags and page size
* 4:0 : phys_page_size_shift - page size is (1 <<
* phys_page_size_shift). Page size is used for
* building the Virtual to Physical address mapping
* 6:5 : reserved - MBZ
* 7 : mem_addr_phy_mode_en - Enable bit for physical
* memory registration (no translation), can be used
* only by privileged clients. If set, PBL must
* contain a single entry.
*/
u8 flags;
/*
* permissions
* 0 : local_write_enable - Local write permissions:
* must be set for RQ buffers and buffers posted for
* RDMA Read requests
* 1 : reserved1 - MBZ
* 2 : remote_read_enable - Remote read permissions:
* must be set to enable RDMA read from the region
* 7:3 : reserved2 - MBZ
*/
u8 permissions;
/* MBZ */
u16 reserved16_w5;
/* number of pages in PBL (redundant, could be calculated) */
u32 page_num;
/*
* IO Virtual Address associated with this MR. If
* mem_addr_phy_mode_en is set, contains the physical address of
* the region.
*/
u64 iova;
};
struct efa_admin_reg_mr_resp {
/* Common Admin Queue completion descriptor */
struct efa_admin_acq_common_desc acq_common_desc;
/*
* L_Key, to be used in conjunction with local buffer references in
* SQ and RQ WQE, or with virtual RQ/CQ rings
*/
u32 l_key;
/*
* R_Key, to be used in RDMA messages to refer to remotely accessed
* memory region
*/
u32 r_key;
};
struct efa_admin_dereg_mr_cmd {
/* Common Admin Queue descriptor */
struct efa_admin_aq_common_desc aq_common_desc;
/* L_Key, memory region's l_key */
u32 l_key;
};
struct efa_admin_dereg_mr_resp {
/* Common Admin Queue completion descriptor */
struct efa_admin_acq_common_desc acq_common_desc;
};
struct efa_admin_create_cq_cmd {
struct efa_admin_aq_common_desc aq_common_desc;
/*
* 4:0 : reserved5 - MBZ
* 5 : interrupt_mode_enabled - if set, cq operates
* in interrupt mode (i.e. CQ events and MSI-X are
* generated), otherwise - polling
* 6 : virt - If set, ring base address is virtual
* (IOVA returned by MR registration)
* 7 : reserved6 - MBZ
*/
u8 cq_caps_1;
/*
* 4:0 : cq_entry_size_words - size of CQ entry in
* 32-bit words, valid values: 4, 8.
* 7:5 : reserved7 - MBZ
*/
u8 cq_caps_2;
/* completion queue depth in # of entries. must be power of 2 */
u16 cq_depth;
/* msix vector assigned to this cq */
u32 msix_vector_idx;
/*
* CQ ring base address, virtual or physical depending on 'virt'
* flag
*/
struct efa_common_mem_addr cq_ba;
/*
* Memory registration key for the ring, used only when base
* address is virtual
*/
u32 l_key;
/*
* number of sub cqs - must be equal to sub_cqs_per_cq of queue
* attributes.
*/
u16 num_sub_cqs;
/* UAR number */
u16 uar;
};
struct efa_admin_create_cq_resp {
struct efa_admin_acq_common_desc acq_common_desc;
u16 cq_idx;
/* actual cq depth in number of entries */
u16 cq_actual_depth;
};
struct efa_admin_destroy_cq_cmd {
struct efa_admin_aq_common_desc aq_common_desc;
u16 cq_idx;
/* MBZ */
u16 reserved1;
};
struct efa_admin_destroy_cq_resp {
struct efa_admin_acq_common_desc acq_common_desc;
};
/*
* EFA AQ Get Statistics command. Extended statistics are placed in control
* buffer pointed by AQ entry
*/
struct efa_admin_aq_get_stats_cmd {
struct efa_admin_aq_common_desc aq_common_descriptor;
union {
/* command specific inline data */
u32 inline_data_w1[3];
struct efa_admin_ctrl_buff_info control_buffer;
} u;
/* stats type as defined in enum efa_admin_get_stats_type */
u8 type;
/* stats scope defined in enum efa_admin_get_stats_scope */
u8 scope;
u16 scope_modifier;
};
struct efa_admin_basic_stats {
u64 tx_bytes;
u64 tx_pkts;
u64 rx_bytes;
u64 rx_pkts;
u64 rx_drops;
};
struct efa_admin_acq_get_stats_resp {
struct efa_admin_acq_common_desc acq_common_desc;
struct efa_admin_basic_stats basic_stats;
};
struct efa_admin_get_set_feature_common_desc {
/*
* 1:0 : select - 0x1 - current value; 0x3 - default
* value
* 7:3 : reserved3 - MBZ
*/
u8 flags;
/* as appears in efa_admin_aq_feature_id */
u8 feature_id;
/* MBZ */
u16 reserved16;
};
struct efa_admin_feature_device_attr_desc {
/* Bitmap of efa_admin_aq_feature_id */
u64 supported_features;
/* Bitmap of supported page sizes in MR registrations */
u64 page_size_cap;
u32 fw_version;
u32 admin_api_version;
u32 device_version;
/* Bar used for SQ and RQ doorbells */
u16 db_bar;
/* Indicates how many bits are used on physical address access */
u8 phys_addr_width;
/* Indicates how many bits are used on virtual address access */
u8 virt_addr_width;
/*
* 0 : rdma_read - If set, RDMA Read is supported on
* TX queues
* 31:1 : reserved - MBZ
*/
u32 device_caps;
/* Max RDMA transfer size in bytes */
u32 max_rdma_size;
};
struct efa_admin_feature_queue_attr_desc {
/* The maximum number of queue pairs supported */
u32 max_qp;
/* Maximum number of WQEs per Send Queue */
u32 max_sq_depth;
/* Maximum size of data that can be sent inline in a Send WQE */
u32 inline_buf_size;
/* Maximum number of buffer descriptors per Recv Queue */
u32 max_rq_depth;
/* The maximum number of completion queues supported per VF */
u32 max_cq;
/* Maximum number of CQEs per Completion Queue */
u32 max_cq_depth;
/* Number of sub-CQs to be created for each CQ */
u16 sub_cqs_per_cq;
/* MBZ */
u16 reserved;
/* Maximum number of SGEs (buffers) allowed for a single send WQE */
u16 max_wr_send_sges;
/* Maximum number of SGEs allowed for a single recv WQE */
u16 max_wr_recv_sges;
/* The maximum number of memory regions supported */
u32 max_mr;
/* The maximum number of pages can be registered */
u32 max_mr_pages;
/* The maximum number of protection domains supported */
u32 max_pd;
/* The maximum number of address handles supported */
u32 max_ah;
/* The maximum size of LLQ in bytes */
u32 max_llq_size;
/* Maximum number of SGEs for a single RDMA read WQE */
u16 max_wr_rdma_sges;
};
struct efa_admin_feature_aenq_desc {
/* bitmask for AENQ groups the device can report */
u32 supported_groups;
/* bitmask for AENQ groups to report */
u32 enabled_groups;
};
struct efa_admin_feature_network_attr_desc {
/* Raw address data in network byte order */
u8 addr[16];
/* max packet payload size in bytes */
u32 mtu;
};
/*
* When hint value is 0, hints capabilities are not supported or driver
* should use its own predefined value
*/
struct efa_admin_hw_hints {
/* value in ms */
u16 mmio_read_timeout;
/* value in ms */
u16 driver_watchdog_timeout;
/* value in ms */
u16 admin_completion_timeout;
/* poll interval in ms */
u16 poll_interval;
};
struct efa_admin_get_feature_cmd {
struct efa_admin_aq_common_desc aq_common_descriptor;
struct efa_admin_ctrl_buff_info control_buffer;
struct efa_admin_get_set_feature_common_desc feature_common;
u32 raw[11];
};
struct efa_admin_get_feature_resp {
struct efa_admin_acq_common_desc acq_common_desc;
union {
u32 raw[14];
struct efa_admin_feature_device_attr_desc device_attr;
struct efa_admin_feature_aenq_desc aenq;
struct efa_admin_feature_network_attr_desc network_attr;
struct efa_admin_feature_queue_attr_desc queue_attr;
struct efa_admin_hw_hints hw_hints;
} u;
};
struct efa_admin_set_feature_cmd {
struct efa_admin_aq_common_desc aq_common_descriptor;
struct efa_admin_ctrl_buff_info control_buffer;
struct efa_admin_get_set_feature_common_desc feature_common;
union {
u32 raw[11];
/* AENQ configuration */
struct efa_admin_feature_aenq_desc aenq;
} u;
};
struct efa_admin_set_feature_resp {
struct efa_admin_acq_common_desc acq_common_desc;
union {
u32 raw[14];
} u;
};
struct efa_admin_alloc_pd_cmd {
struct efa_admin_aq_common_desc aq_common_descriptor;
};
struct efa_admin_alloc_pd_resp {
struct efa_admin_acq_common_desc acq_common_desc;
/* PD number */
u16 pd;
/* MBZ */
u16 reserved;
};
struct efa_admin_dealloc_pd_cmd {
struct efa_admin_aq_common_desc aq_common_descriptor;
/* PD number */
u16 pd;
/* MBZ */
u16 reserved;
};
struct efa_admin_dealloc_pd_resp {
struct efa_admin_acq_common_desc acq_common_desc;
};
struct efa_admin_alloc_uar_cmd {
struct efa_admin_aq_common_desc aq_common_descriptor;
};
struct efa_admin_alloc_uar_resp {
struct efa_admin_acq_common_desc acq_common_desc;
/* UAR number */
u16 uar;
/* MBZ */
u16 reserved;
};
struct efa_admin_dealloc_uar_cmd {
struct efa_admin_aq_common_desc aq_common_descriptor;
/* UAR number */
u16 uar;
/* MBZ */
u16 reserved;
};
struct efa_admin_dealloc_uar_resp {
struct efa_admin_acq_common_desc acq_common_desc;
};
/* asynchronous event notification groups */
enum efa_admin_aenq_group {
EFA_ADMIN_FATAL_ERROR = 1,
EFA_ADMIN_WARNING = 2,
EFA_ADMIN_NOTIFICATION = 3,
EFA_ADMIN_KEEP_ALIVE = 4,
EFA_ADMIN_AENQ_GROUPS_NUM = 5,
};
enum efa_admin_aenq_notification_syndrom {
EFA_ADMIN_SUSPEND = 0,
EFA_ADMIN_RESUME = 1,
EFA_ADMIN_UPDATE_HINTS = 2,
};
struct efa_admin_mmio_req_read_less_resp {
u16 req_id;
u16 reg_off;
/* value is valid when poll is cleared */
u32 reg_val;
};
/* create_qp_cmd */
#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
/* reg_mr_cmd */
#define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0)
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2)
/* create_cq_cmd */
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
/* get_set_feature_common_desc */
#define EFA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
/* feature_device_attr_desc */
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
#endif /* _EFA_ADMIN_CMDS_H_ */
|