1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
/* Generate flow hash field from flow field type(s) */
#define ICE_FLOW_HASH_IPV4 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
#define ICE_FLOW_HASH_IPV6 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
#define ICE_FLOW_HASH_TCP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
#define ICE_FLOW_HASH_UDP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
#define ICE_FLOW_HASH_SCTP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
#define ICE_HASH_INVALID 0
#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
/* Protocol header fields within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
* tunneling or encapsulation protocols for network virtualization such as GRE,
* VxLAN, etc.
*/
enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_NONE = 0x00000000,
ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
};
enum ice_flow_field {
/* L3 */
ICE_FLOW_FIELD_IDX_IPV4_SA,
ICE_FLOW_FIELD_IDX_IPV4_DA,
ICE_FLOW_FIELD_IDX_IPV6_SA,
ICE_FLOW_FIELD_IDX_IPV6_DA,
/* L4 */
ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
/* GRE */
ICE_FLOW_FIELD_IDX_GRE_KEYID,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
/* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */
ICE_AVF_FLOW_FIELD_INVALID = 0,
ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
ICE_AVF_FLOW_FIELD_IPV4_UDP,
ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
ICE_AVF_FLOW_FIELD_IPV4_TCP,
ICE_AVF_FLOW_FIELD_IPV4_SCTP,
ICE_AVF_FLOW_FIELD_IPV4_OTHER,
ICE_AVF_FLOW_FIELD_FRAG_IPV4,
/* Values 37-38 are reserved */
ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
ICE_AVF_FLOW_FIELD_IPV6_UDP,
ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
ICE_AVF_FLOW_FIELD_IPV6_TCP,
ICE_AVF_FLOW_FIELD_IPV6_SCTP,
ICE_AVF_FLOW_FIELD_IPV6_OTHER,
ICE_AVF_FLOW_FIELD_FRAG_IPV6,
ICE_AVF_FLOW_FIELD_RSVD47,
ICE_AVF_FLOW_FIELD_FCOE_OX,
ICE_AVF_FLOW_FIELD_FCOE_RX,
ICE_AVF_FLOW_FIELD_FCOE_OTHER,
/* Values 51-62 are reserved */
ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
ICE_AVF_FLOW_FIELD_MAX
};
/* Supported RSS offloads This macro is defined to support
* VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
* capabilities to the caller of this ops.
*/
#define ICE_DEFAULT_RSS_HENA ( \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
enum ice_flow_dir {
ICE_FLOW_RX = 0x02,
};
enum ice_flow_priority {
ICE_FLOW_PRIO_LOW,
ICE_FLOW_PRIO_NORMAL,
ICE_FLOW_PRIO_HIGH
};
#define ICE_FLOW_SEG_MAX 2
#define ICE_FLOW_SEG_RAW_FLD_MAX 2
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
struct ice_flow_seg_xtrct {
u8 prot_id; /* Protocol ID of extracted header field */
u16 off; /* Starting offset of the field in header in bytes */
u8 idx; /* Index of FV entry used */
u8 disp; /* Displacement of field in bits fr. FV entry's start */
};
enum ice_flow_fld_match_type {
ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
};
struct ice_flow_fld_loc {
/* Describe offsets of field information relative to the beginning of
* input buffer provided when adding flow entries.
*/
u16 val; /* Offset where the value is located */
u16 mask; /* Offset where the mask/prefix value is located */
u16 last; /* Length or offset where the upper value is located */
};
struct ice_flow_fld_info {
enum ice_flow_fld_match_type type;
/* Location where to retrieve data from an input buffer */
struct ice_flow_fld_loc src;
/* Location where to put the data into the final entry buffer */
struct ice_flow_fld_loc entry;
struct ice_flow_seg_xtrct xtrct;
};
struct ice_flow_seg_fld_raw {
struct ice_flow_fld_info info;
u16 off; /* Offset from the start of the segment */
};
struct ice_flow_seg_info {
u32 hdrs; /* Bitmask indicating protocol headers present */
u64 match; /* Bitmask indicating header fields to be matched */
u64 range; /* Bitmask indicating header fields matched as ranges */
struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
u8 raws_cnt; /* Number of raw fields to be matched */
struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
};
/* This structure describes a flow entry, and is tracked only in this file */
struct ice_flow_entry {
struct list_head l_entry;
u64 id;
struct ice_flow_prof *prof;
/* Flow entry's content */
void *entry;
enum ice_flow_priority priority;
u16 vsi_handle;
u16 entry_sz;
};
#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
struct ice_flow_prof {
struct list_head l_entry;
u64 id;
enum ice_flow_dir dir;
u8 segs_cnt;
/* Keep track of flow entries associated with this flow profile */
struct mutex entries_lock;
struct list_head entries;
struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
/* software VSI handles referenced by this flow profile */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
};
struct ice_rss_cfg {
struct list_head l_entry;
/* bitmap of VSIs added to the RSS entry */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
u64 hashed_flds;
u32 packet_hdr;
};
enum ice_status
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof);
enum ice_status
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
enum ice_status
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, u64 *entry_h);
enum ice_status
ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
void
ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
u16 val_loc, u16 mask_loc);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
#endif /* _ICE_FLOW_H_ */
|