1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2013-2022, Intel Corporation. */
3
4 #ifndef _VIRTCHNL_H_
5 #define _VIRTCHNL_H_
6
7 /* Description:
8 * This header file describes the Virtual Function (VF) - Physical Function
9 * (PF) communication protocol used by the drivers for all devices starting
10 * from our 40G product line
11 *
12 * Admin queue buffer usage:
13 * desc->opcode is always aqc_opc_send_msg_to_pf
14 * flags, retval, datalen, and data addr are all used normally.
15 * The Firmware copies the cookie fields when sending messages between the
16 * PF and VF, but uses all other fields internally. Due to this limitation,
17 * we must send all messages as "indirect", i.e. using an external buffer.
18 *
19 * All the VSI indexes are relative to the VF. Each VF can have maximum of
20 * three VSIs. All the queue indexes are relative to the VSI. Each VF can
21 * have a maximum of sixteen queues for all of its VSIs.
22 *
23 * The PF is required to return a status code in v_retval for all messages
24 * except RESET_VF, which does not require any response. The returned value
25 * is of virtchnl_status_code type, defined here.
26 *
27 * In general, VF driver initialization should roughly follow the order of
28 * these opcodes. The VF driver must first validate the API version of the
29 * PF driver, then request a reset, then get resources, then configure
30 * queues and interrupts. After these operations are complete, the VF
31 * driver may start its queues, optionally add MAC and VLAN filters, and
32 * process traffic.
33 */
34
35 /* START GENERIC DEFINES
36 * Need to ensure the following enums and defines hold the same meaning and
37 * value in current and future projects
38 */
39
40 /* Error Codes */
41 enum virtchnl_status_code {
42 VIRTCHNL_STATUS_SUCCESS = 0,
43 VIRTCHNL_STATUS_ERR_PARAM = -5,
44 VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
45 VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
46 VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
47 VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
48 VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
49 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
50 };
51
52 /* Backward compatibility */
53 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
54 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
55
56 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
57 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
58 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
59 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
60 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
61 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
62 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
63 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
64
65 enum virtchnl_link_speed {
66 VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
67 VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
68 VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
69 VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
70 VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
71 VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
72 VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
73 VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
74 VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
75 };
76
77 /* for hsplit_0 field of Rx HMC context */
78 /* deprecated with AVF 1.0 */
79 enum virtchnl_rx_hsplit {
80 VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
81 VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
82 VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
83 VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
84 VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
85 };
86
87 /* END GENERIC DEFINES */
88
89 /* Opcodes for VF-PF communication. These are placed in the v_opcode field
90 * of the virtchnl_msg structure.
91 */
92 enum virtchnl_ops {
93 /* The PF sends status change events to VFs using
94 * the VIRTCHNL_OP_EVENT opcode.
95 * VFs send requests to the PF using the other ops.
96 * Use of "advanced opcode" features must be negotiated as part of capabilities
97 * exchange and are not considered part of base mode feature set.
98 */
99 VIRTCHNL_OP_UNKNOWN = 0,
100 VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
101 VIRTCHNL_OP_RESET_VF = 2,
102 VIRTCHNL_OP_GET_VF_RESOURCES = 3,
103 VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
104 VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
105 VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
106 VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
107 VIRTCHNL_OP_ENABLE_QUEUES = 8,
108 VIRTCHNL_OP_DISABLE_QUEUES = 9,
109 VIRTCHNL_OP_ADD_ETH_ADDR = 10,
110 VIRTCHNL_OP_DEL_ETH_ADDR = 11,
111 VIRTCHNL_OP_ADD_VLAN = 12,
112 VIRTCHNL_OP_DEL_VLAN = 13,
113 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
114 VIRTCHNL_OP_GET_STATS = 15,
115 VIRTCHNL_OP_RSVD = 16,
116 VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
117 /* opcode 19 is reserved */
118 VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
119 VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
120 VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
121 VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
122 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
123 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
124 VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
125 VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
126 VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
127 VIRTCHNL_OP_SET_RSS_HENA = 26,
128 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
129 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
130 VIRTCHNL_OP_REQUEST_QUEUES = 29,
131 VIRTCHNL_OP_ENABLE_CHANNELS = 30,
132 VIRTCHNL_OP_DISABLE_CHANNELS = 31,
133 VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
134 VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
135 /* opcode 34 - 43 are reserved */
136 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
137 VIRTCHNL_OP_ADD_RSS_CFG = 45,
138 VIRTCHNL_OP_DEL_RSS_CFG = 46,
139 VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
140 VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
141 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
142 VIRTCHNL_OP_ADD_VLAN_V2 = 52,
143 VIRTCHNL_OP_DEL_VLAN_V2 = 53,
144 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
145 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
146 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
147 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
148 VIRTCHNL_OP_MAX,
149 };
150
151 /* These macros are used to generate compilation errors if a structure/union
152 * is not exactly the correct length. It gives a divide by zero error if the
153 * structure/union is not of the correct size, otherwise it creates an enum
154 * that is never used.
155 */
156 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
157 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
158 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
159 { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
160
161 /* Message descriptions and data structures. */
162
163 /* VIRTCHNL_OP_VERSION
164 * VF posts its version number to the PF. PF responds with its version number
165 * in the same format, along with a return code.
166 * Reply from PF has its major/minor versions also in param0 and param1.
167 * If there is a major version mismatch, then the VF cannot operate.
168 * If there is a minor version mismatch, then the VF can operate but should
169 * add a warning to the system log.
170 *
171 * This enum element MUST always be specified as == 1, regardless of other
172 * changes in the API. The PF must always respond to this message without
173 * error regardless of version mismatch.
174 */
175 #define VIRTCHNL_VERSION_MAJOR 1
176 #define VIRTCHNL_VERSION_MINOR 1
177 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
178
179 struct virtchnl_version_info {
180 u32 major;
181 u32 minor;
182 };
183
184 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
185
186 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
187 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
188
189 /* VIRTCHNL_OP_RESET_VF
190 * VF sends this request to PF with no parameters
191 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
192 * until reset completion is indicated. The admin queue must be reinitialized
193 * after this operation.
194 *
195 * When reset is complete, PF must ensure that all queues in all VSIs associated
196 * with the VF are stopped, all queue configurations in the HMC are set to 0,
197 * and all MAC and VLAN filters (except the default MAC address) on all VSIs
198 * are cleared.
199 */
200
201 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
202 * vsi_type should always be 6 for backward compatibility. Add other fields
203 * as needed.
204 */
205 enum virtchnl_vsi_type {
206 VIRTCHNL_VSI_TYPE_INVALID = 0,
207 VIRTCHNL_VSI_SRIOV = 6,
208 };
209
210 /* VIRTCHNL_OP_GET_VF_RESOURCES
211 * Version 1.0 VF sends this request to PF with no parameters
212 * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
213 * PF responds with an indirect message containing
214 * virtchnl_vf_resource and one or more
215 * virtchnl_vsi_resource structures.
216 */
217
218 struct virtchnl_vsi_resource {
219 u16 vsi_id;
220 u16 num_queue_pairs;
221
222 /* see enum virtchnl_vsi_type */
223 s32 vsi_type;
224 u16 qset_handle;
225 u8 default_mac_addr[ETH_ALEN];
226 };
227
228 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
229
230 /* VF capability flags
231 * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
232 * TX/RX Checksum offloading and TSO for non-tunnelled packets.
233 */
234 #define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
235 #define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
236 #define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
237 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
238 #define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
239 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
240 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
241 /* used to negotiate communicating link speeds in Mbps */
242 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
243 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
244 #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
245 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
246 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
247 #define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
248 #define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
249 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
250 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
251 #define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
252 #define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
253 #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
254 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
255 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
256
257 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
258 VIRTCHNL_VF_OFFLOAD_VLAN | \
259 VIRTCHNL_VF_OFFLOAD_RSS_PF)
260
261 struct virtchnl_vf_resource {
262 u16 num_vsis;
263 u16 num_queue_pairs;
264 u16 max_vectors;
265 u16 max_mtu;
266
267 u32 vf_cap_flags;
268 u32 rss_key_size;
269 u32 rss_lut_size;
270
271 struct virtchnl_vsi_resource vsi_res[1];
272 };
273
274 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
275
276 /* VIRTCHNL_OP_CONFIG_TX_QUEUE
277 * VF sends this message to set up parameters for one TX queue.
278 * External data buffer contains one instance of virtchnl_txq_info.
279 * PF configures requested queue and returns a status code.
280 */
281
282 /* Tx queue config info */
283 struct virtchnl_txq_info {
284 u16 vsi_id;
285 u16 queue_id;
286 u16 ring_len; /* number of descriptors, multiple of 8 */
287 u16 headwb_enabled; /* deprecated with AVF 1.0 */
288 u64 dma_ring_addr;
289 u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
290 };
291
292 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
293
294 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
295 * VF sends this message to set up parameters for one RX queue.
296 * External data buffer contains one instance of virtchnl_rxq_info.
297 * PF configures requested queue and returns a status code.
298 */
299
300 /* Rx queue config info */
301 struct virtchnl_rxq_info {
302 u16 vsi_id;
303 u16 queue_id;
304 u32 ring_len; /* number of descriptors, multiple of 32 */
305 u16 hdr_size;
306 u16 splithdr_enabled; /* deprecated with AVF 1.0 */
307 u32 databuffer_size;
308 u32 max_pkt_size;
309 u8 pad0;
310 u8 rxdid;
311 u8 pad1[2];
312 u64 dma_ring_addr;
313
314 /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
315 s32 rx_split_pos;
316 u32 pad2;
317 };
318
319 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
320
321 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES
322 * VF sends this message to set parameters for all active TX and RX queues
323 * associated with the specified VSI.
324 * PF configures queues and returns status.
325 * If the number of queues specified is greater than the number of queues
326 * associated with the VSI, an error is returned and no queues are configured.
327 * NOTE: The VF is not required to configure all queues in a single request.
328 * It may send multiple messages. PF drivers must correctly handle all VF
329 * requests.
330 */
331 struct virtchnl_queue_pair_info {
332 /* NOTE: vsi_id and queue_id should be identical for both queues. */
333 struct virtchnl_txq_info txq;
334 struct virtchnl_rxq_info rxq;
335 };
336
337 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
338
339 struct virtchnl_vsi_queue_config_info {
340 u16 vsi_id;
341 u16 num_queue_pairs;
342 u32 pad;
343 struct virtchnl_queue_pair_info qpair[1];
344 };
345
346 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
347
348 /* VIRTCHNL_OP_REQUEST_QUEUES
349 * VF sends this message to request the PF to allocate additional queues to
350 * this VF. Each VF gets a guaranteed number of queues on init but asking for
351 * additional queues must be negotiated. This is a best effort request as it
352 * is possible the PF does not have enough queues left to support the request.
353 * If the PF cannot support the number requested it will respond with the
354 * maximum number it is able to support. If the request is successful, PF will
355 * then reset the VF to institute required changes.
356 */
357
358 /* VF resource request */
359 struct virtchnl_vf_res_request {
360 u16 num_queue_pairs;
361 };
362
363 /* VIRTCHNL_OP_CONFIG_IRQ_MAP
364 * VF uses this message to map vectors to queues.
365 * The rxq_map and txq_map fields are bitmaps used to indicate which queues
366 * are to be associated with the specified vector.
367 * The "other" causes are always mapped to vector 0. The VF may not request
368 * that vector 0 be used for traffic.
369 * PF configures interrupt mapping and returns status.
370 * NOTE: due to hardware requirements, all active queues (both TX and RX)
371 * should be mapped to interrupts, even if the driver intends to operate
372 * only in polling mode. In this case the interrupt may be disabled, but
373 * the ITR timer will still run to trigger writebacks.
374 */
375 struct virtchnl_vector_map {
376 u16 vsi_id;
377 u16 vector_id;
378 u16 rxq_map;
379 u16 txq_map;
380 u16 rxitr_idx;
381 u16 txitr_idx;
382 };
383
384 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
385
386 struct virtchnl_irq_map_info {
387 u16 num_vectors;
388 struct virtchnl_vector_map vecmap[1];
389 };
390
391 VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
392
393 /* VIRTCHNL_OP_ENABLE_QUEUES
394 * VIRTCHNL_OP_DISABLE_QUEUES
395 * VF sends these message to enable or disable TX/RX queue pairs.
396 * The queues fields are bitmaps indicating which queues to act upon.
397 * (Currently, we only support 16 queues per VF, but we make the field
398 * u32 to allow for expansion.)
399 * PF performs requested action and returns status.
400 * NOTE: The VF is not required to enable/disable all queues in a single
401 * request. It may send multiple messages.
402 * PF drivers must correctly handle all VF requests.
403 */
404 struct virtchnl_queue_select {
405 u16 vsi_id;
406 u16 pad;
407 u32 rx_queues;
408 u32 tx_queues;
409 };
410
411 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
412
413 /* VIRTCHNL_OP_ADD_ETH_ADDR
414 * VF sends this message in order to add one or more unicast or multicast
415 * address filters for the specified VSI.
416 * PF adds the filters and returns status.
417 */
418
419 /* VIRTCHNL_OP_DEL_ETH_ADDR
420 * VF sends this message in order to remove one or more unicast or multicast
421 * filters for the specified VSI.
422 * PF removes the filters and returns status.
423 */
424
425 /* VIRTCHNL_ETHER_ADDR_LEGACY
426 * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
427 * bytes. Moving forward all VF drivers should not set type to
428 * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
429 * behavior. The control plane function (i.e. PF) can use a best effort method
430 * of tracking the primary/device unicast in this case, but there is no
431 * guarantee and functionality depends on the implementation of the PF.
432 */
433
434 /* VIRTCHNL_ETHER_ADDR_PRIMARY
435 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
436 * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
437 * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
438 * function (i.e. PF) to accurately track and use this MAC address for
439 * displaying on the host and for VM/function reset.
440 */
441
442 /* VIRTCHNL_ETHER_ADDR_EXTRA
443 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
444 * unicast and/or multicast filters that are being added/deleted via
445 * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
446 */
447 struct virtchnl_ether_addr {
448 u8 addr[ETH_ALEN];
449 u8 type;
450 #define VIRTCHNL_ETHER_ADDR_LEGACY 0
451 #define VIRTCHNL_ETHER_ADDR_PRIMARY 1
452 #define VIRTCHNL_ETHER_ADDR_EXTRA 2
453 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
454 u8 pad;
455 };
456
457 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
458
459 struct virtchnl_ether_addr_list {
460 u16 vsi_id;
461 u16 num_elements;
462 struct virtchnl_ether_addr list[1];
463 };
464
465 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
466
467 /* VIRTCHNL_OP_ADD_VLAN
468 * VF sends this message to add one or more VLAN tag filters for receives.
469 * PF adds the filters and returns status.
470 * If a port VLAN is configured by the PF, this operation will return an
471 * error to the VF.
472 */
473
474 /* VIRTCHNL_OP_DEL_VLAN
475 * VF sends this message to remove one or more VLAN tag filters for receives.
476 * PF removes the filters and returns status.
477 * If a port VLAN is configured by the PF, this operation will return an
478 * error to the VF.
479 */
480
481 struct virtchnl_vlan_filter_list {
482 u16 vsi_id;
483 u16 num_elements;
484 u16 vlan_id[1];
485 };
486
487 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
488
489 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
490 * structures and opcodes.
491 *
492 * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
493 * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
494 *
495 * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
496 * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
497 * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
498 *
499 * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
500 * by the PF concurrently. For example, if the PF can support
501 * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
502 * would OR the following bits:
503 *
504 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
505 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
506 * VIRTCHNL_VLAN_ETHERTYPE_AND;
507 *
508 * The VF would interpret this as VLAN filtering can be supported on both 0x8100
509 * and 0x88A8 VLAN ethertypes.
510 *
511 * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
512 * by the PF concurrently. For example if the PF can support
513 * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
514 * offload it would OR the following bits:
515 *
516 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
517 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
518 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
519 *
520 * The VF would interpret this as VLAN stripping can be supported on either
521 * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
522 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
523 * the previously set value.
524 *
525 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
526 * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
527 *
528 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
529 * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
530 *
531 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
532 * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
533 *
534 * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
535 * VLAN filtering if the underlying PF supports it.
536 *
537 * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
538 * certain VLAN capability can be toggled. For example if the underlying PF/CP
539 * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
540 * set this bit along with the supported ethertypes.
541 */
542 enum virtchnl_vlan_support {
543 VIRTCHNL_VLAN_UNSUPPORTED = 0,
544 VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0),
545 VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1),
546 VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2),
547 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8),
548 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9),
549 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10),
550 VIRTCHNL_VLAN_PRIO = BIT(24),
551 VIRTCHNL_VLAN_FILTER_MASK = BIT(28),
552 VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29),
553 VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30),
554 VIRTCHNL_VLAN_TOGGLE = BIT(31),
555 };
556
557 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
558 * for filtering, insertion, and stripping capabilities.
559 *
560 * If only outer capabilities are supported (for filtering, insertion, and/or
561 * stripping) then this refers to the outer most or single VLAN from the VF's
562 * perspective.
563 *
564 * If only inner capabilities are supported (for filtering, insertion, and/or
565 * stripping) then this refers to the outer most or single VLAN from the VF's
566 * perspective. Functionally this is the same as if only outer capabilities are
567 * supported. The VF driver is just forced to use the inner fields when
568 * adding/deleting filters and enabling/disabling offloads (if supported).
569 *
570 * If both outer and inner capabilities are supported (for filtering, insertion,
571 * and/or stripping) then outer refers to the outer most or single VLAN and
572 * inner refers to the second VLAN, if it exists, in the packet.
573 *
574 * There is no support for tunneled VLAN offloads, so outer or inner are never
575 * referring to a tunneled packet from the VF's perspective.
576 */
577 struct virtchnl_vlan_supported_caps {
578 u32 outer;
579 u32 inner;
580 };
581
582 /* The PF populates these fields based on the supported VLAN filtering. If a
583 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
584 * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
585 * the unsupported fields.
586 *
587 * Also, a VF is only allowed to toggle its VLAN filtering setting if the
588 * VIRTCHNL_VLAN_TOGGLE bit is set.
589 *
590 * The ethertype(s) specified in the ethertype_init field are the ethertypes
591 * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
592 * most VLAN from the VF's perspective. If both inner and outer filtering are
593 * allowed then ethertype_init only refers to the outer most VLAN as only
594 * VLAN ethertype supported for inner VLAN filtering is
595 * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
596 * when both inner and outer filtering are allowed.
597 *
598 * The max_filters field tells the VF how many VLAN filters it's allowed to have
599 * at any one time. If it exceeds this amount and tries to add another filter,
600 * then the request will be rejected by the PF. To prevent failures, the VF
601 * should keep track of how many VLAN filters it has added and not attempt to
602 * add more than max_filters.
603 */
604 struct virtchnl_vlan_filtering_caps {
605 struct virtchnl_vlan_supported_caps filtering_support;
606 u32 ethertype_init;
607 u16 max_filters;
608 u8 pad[2];
609 };
610
611 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
612
613 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify
614 * if the PF supports a different ethertype for stripping and insertion.
615 *
616 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
617 * for stripping affect the ethertype(s) specified for insertion and visa versa
618 * as well. If the VF tries to configure VLAN stripping via
619 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
620 * that will be the ethertype for both stripping and insertion.
621 *
622 * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
623 * stripping do not affect the ethertype(s) specified for insertion and visa
624 * versa.
625 */
626 enum virtchnl_vlan_ethertype_match {
627 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
628 VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
629 };
630
631 /* The PF populates these fields based on the supported VLAN offloads. If a
632 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
633 * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
634 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
635 *
636 * Also, a VF is only allowed to toggle its VLAN offload setting if the
637 * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
638 *
639 * The VF driver needs to be aware of how the tags are stripped by hardware and
640 * inserted by the VF driver based on the level of offload support. The PF will
641 * populate these fields based on where the VLAN tags are expected to be
642 * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
643 * interpret these fields. See the definition of the
644 * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
645 * enumeration.
646 */
647 struct virtchnl_vlan_offload_caps {
648 struct virtchnl_vlan_supported_caps stripping_support;
649 struct virtchnl_vlan_supported_caps insertion_support;
650 u32 ethertype_init;
651 u8 ethertype_match;
652 u8 pad[3];
653 };
654
655 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
656
657 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
658 * VF sends this message to determine its VLAN capabilities.
659 *
660 * PF will mark which capabilities it supports based on hardware support and
661 * current configuration. For example, if a port VLAN is configured the PF will
662 * not allow outer VLAN filtering, stripping, or insertion to be configured so
663 * it will block these features from the VF.
664 *
665 * The VF will need to cross reference its capabilities with the PFs
666 * capabilities in the response message from the PF to determine the VLAN
667 * support.
668 */
669 struct virtchnl_vlan_caps {
670 struct virtchnl_vlan_filtering_caps filtering;
671 struct virtchnl_vlan_offload_caps offloads;
672 };
673
674 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
675
676 struct virtchnl_vlan {
677 u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
678 u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
679 * filtering caps
680 */
681 u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
682 * filtering caps. Note that tpid here does not refer to
683 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
684 * actual 2-byte VLAN TPID
685 */
686 u8 pad[2];
687 };
688
689 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
690
691 struct virtchnl_vlan_filter {
692 struct virtchnl_vlan inner;
693 struct virtchnl_vlan outer;
694 u8 pad[16];
695 };
696
697 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
698
699 /* VIRTCHNL_OP_ADD_VLAN_V2
700 * VIRTCHNL_OP_DEL_VLAN_V2
701 *
702 * VF sends these messages to add/del one or more VLAN tag filters for Rx
703 * traffic.
704 *
705 * The PF attempts to add the filters and returns status.
706 *
707 * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
708 * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
709 */
710 struct virtchnl_vlan_filter_list_v2 {
711 u16 vport_id;
712 u16 num_elements;
713 u8 pad[4];
714 struct virtchnl_vlan_filter filters[1];
715 };
716
717 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2);
718
719 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
720 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
721 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
722 * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
723 *
724 * VF sends this message to enable or disable VLAN stripping or insertion. It
725 * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
726 * allowed and whether or not it's allowed to enable/disable the specific
727 * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
728 * parse the virtchnl_vlan_caps.offloads fields to determine which offload
729 * messages are allowed.
730 *
731 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
732 * following manner the VF will be allowed to enable and/or disable 0x8100 inner
733 * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
734 * case means the outer most or single VLAN from the VF's perspective. This is
735 * because no outer offloads are supported. See the comments above the
736 * virtchnl_vlan_supported_caps structure for more details.
737 *
738 * virtchnl_vlan_caps.offloads.stripping_support.inner =
739 * VIRTCHNL_VLAN_TOGGLE |
740 * VIRTCHNL_VLAN_ETHERTYPE_8100;
741 *
742 * virtchnl_vlan_caps.offloads.insertion_support.inner =
743 * VIRTCHNL_VLAN_TOGGLE |
744 * VIRTCHNL_VLAN_ETHERTYPE_8100;
745 *
746 * In order to enable inner (again note that in this case inner is the outer
747 * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
748 * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
749 * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
750 *
751 * virtchnl_vlan_setting.inner_ethertype_setting =
752 * VIRTCHNL_VLAN_ETHERTYPE_8100;
753 *
754 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
755 * initialization.
756 *
757 * The reason that VLAN TPID(s) are not being used for the
758 * outer_ethertype_setting and inner_ethertype_setting fields is because it's
759 * possible a device could support VLAN insertion and/or stripping offload on
760 * multiple ethertypes concurrently, so this method allows a VF to request
761 * multiple ethertypes in one message using the virtchnl_vlan_support
762 * enumeration.
763 *
764 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
765 * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
766 * VLAN insertion and stripping simultaneously. The
767 * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
768 * populated based on what the PF can support.
769 *
770 * virtchnl_vlan_caps.offloads.stripping_support.outer =
771 * VIRTCHNL_VLAN_TOGGLE |
772 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
773 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
774 * VIRTCHNL_VLAN_ETHERTYPE_AND;
775 *
776 * virtchnl_vlan_caps.offloads.insertion_support.outer =
777 * VIRTCHNL_VLAN_TOGGLE |
778 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
779 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
780 * VIRTCHNL_VLAN_ETHERTYPE_AND;
781 *
782 * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
783 * would populate the virthcnl_vlan_offload_structure in the following manner
784 * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
785 *
786 * virtchnl_vlan_setting.outer_ethertype_setting =
787 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
788 * VIRTHCNL_VLAN_ETHERTYPE_88A8;
789 *
790 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
791 * initialization.
792 *
793 * There is also the case where a PF and the underlying hardware can support
794 * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
795 * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
796 * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
797 * offloads. The ethertypes must match for stripping and insertion.
798 *
799 * virtchnl_vlan_caps.offloads.stripping_support.outer =
800 * VIRTCHNL_VLAN_TOGGLE |
801 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
802 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
803 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
804 *
805 * virtchnl_vlan_caps.offloads.insertion_support.outer =
806 * VIRTCHNL_VLAN_TOGGLE |
807 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
808 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
809 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
810 *
811 * virtchnl_vlan_caps.offloads.ethertype_match =
812 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
813 *
814 * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
815 * populate the virtchnl_vlan_setting structure in the following manner and send
816 * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
817 * ethertype for VLAN insertion if it's enabled. So, for completeness, a
818 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
819 *
820 * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
821 *
822 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
823 * initialization.
824 */
825 struct virtchnl_vlan_setting {
826 u32 outer_ethertype_setting;
827 u32 inner_ethertype_setting;
828 u16 vport_id;
829 u8 pad[6];
830 };
831
832 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
833
834 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
835 * VF sends VSI id and flags.
836 * PF returns status code in retval.
837 * Note: we assume that broadcast accept mode is always enabled.
838 */
839 struct virtchnl_promisc_info {
840 u16 vsi_id;
841 u16 flags;
842 };
843
844 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
845
846 #define FLAG_VF_UNICAST_PROMISC 0x00000001
847 #define FLAG_VF_MULTICAST_PROMISC 0x00000002
848
849 /* VIRTCHNL_OP_GET_STATS
850 * VF sends this message to request stats for the selected VSI. VF uses
851 * the virtchnl_queue_select struct to specify the VSI. The queue_id
852 * field is ignored by the PF.
853 *
854 * PF replies with struct eth_stats in an external buffer.
855 */
856
857 /* VIRTCHNL_OP_CONFIG_RSS_KEY
858 * VIRTCHNL_OP_CONFIG_RSS_LUT
859 * VF sends these messages to configure RSS. Only supported if both PF
860 * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
861 * configuration negotiation. If this is the case, then the RSS fields in
862 * the VF resource struct are valid.
863 * Both the key and LUT are initialized to 0 by the PF, meaning that
864 * RSS is effectively disabled until set up by the VF.
865 */
866 struct virtchnl_rss_key {
867 u16 vsi_id;
868 u16 key_len;
869 u8 key[1]; /* RSS hash key, packed bytes */
870 };
871
872 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
873
874 struct virtchnl_rss_lut {
875 u16 vsi_id;
876 u16 lut_entries;
877 u8 lut[1]; /* RSS lookup table */
878 };
879
880 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
881
882 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS
883 * VIRTCHNL_OP_SET_RSS_HENA
884 * VF sends these messages to get and set the hash filter enable bits for RSS.
885 * By default, the PF sets these to all possible traffic types that the
886 * hardware supports. The VF can query this value if it wants to change the
887 * traffic types that are hashed by the hardware.
888 */
889 struct virtchnl_rss_hena {
890 u64 hena;
891 };
892
893 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
894
895 /* VIRTCHNL_OP_ENABLE_CHANNELS
896 * VIRTCHNL_OP_DISABLE_CHANNELS
897 * VF sends these messages to enable or disable channels based on
898 * the user specified queue count and queue offset for each traffic class.
899 * This struct encompasses all the information that the PF needs from
900 * VF to create a channel.
901 */
902 struct virtchnl_channel_info {
903 u16 count; /* number of queues in a channel */
904 u16 offset; /* queues in a channel start from 'offset' */
905 u32 pad;
906 u64 max_tx_rate;
907 };
908
909 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
910
911 struct virtchnl_tc_info {
912 u32 num_tc;
913 u32 pad;
914 struct virtchnl_channel_info list[1];
915 };
916
917 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
918
919 /* VIRTCHNL_ADD_CLOUD_FILTER
920 * VIRTCHNL_DEL_CLOUD_FILTER
921 * VF sends these messages to add or delete a cloud filter based on the
922 * user specified match and action filters. These structures encompass
923 * all the information that the PF needs from the VF to add/delete a
924 * cloud filter.
925 */
926
927 struct virtchnl_l4_spec {
928 u8 src_mac[ETH_ALEN];
929 u8 dst_mac[ETH_ALEN];
930 __be16 vlan_id;
931 __be16 pad; /* reserved for future use */
932 __be32 src_ip[4];
933 __be32 dst_ip[4];
934 __be16 src_port;
935 __be16 dst_port;
936 };
937
938 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
939
940 union virtchnl_flow_spec {
941 struct virtchnl_l4_spec tcp_spec;
942 u8 buffer[128]; /* reserved for future use */
943 };
944
945 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
946
947 enum virtchnl_action {
948 /* action types */
949 VIRTCHNL_ACTION_DROP = 0,
950 VIRTCHNL_ACTION_TC_REDIRECT,
951 VIRTCHNL_ACTION_PASSTHRU,
952 VIRTCHNL_ACTION_QUEUE,
953 VIRTCHNL_ACTION_Q_REGION,
954 VIRTCHNL_ACTION_MARK,
955 VIRTCHNL_ACTION_COUNT,
956 };
957
958 enum virtchnl_flow_type {
959 /* flow types */
960 VIRTCHNL_TCP_V4_FLOW = 0,
961 VIRTCHNL_TCP_V6_FLOW,
962 };
963
964 struct virtchnl_filter {
965 union virtchnl_flow_spec data;
966 union virtchnl_flow_spec mask;
967
968 /* see enum virtchnl_flow_type */
969 s32 flow_type;
970
971 /* see enum virtchnl_action */
972 s32 action;
973 u32 action_meta;
974 u8 field_flags;
975 u8 pad[3];
976 };
977
978 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
979
980 struct virtchnl_supported_rxdids {
981 u64 supported_rxdids;
982 };
983
984 /* VIRTCHNL_OP_EVENT
985 * PF sends this message to inform the VF driver of events that may affect it.
986 * No direct response is expected from the VF, though it may generate other
987 * messages in response to this one.
988 */
989 enum virtchnl_event_codes {
990 VIRTCHNL_EVENT_UNKNOWN = 0,
991 VIRTCHNL_EVENT_LINK_CHANGE,
992 VIRTCHNL_EVENT_RESET_IMPENDING,
993 VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
994 };
995
996 #define PF_EVENT_SEVERITY_INFO 0
997 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
998
999 struct virtchnl_pf_event {
1000 /* see enum virtchnl_event_codes */
1001 s32 event;
1002 union {
1003 /* If the PF driver does not support the new speed reporting
1004 * capabilities then use link_event else use link_event_adv to
1005 * get the speed and link information. The ability to understand
1006 * new speeds is indicated by setting the capability flag
1007 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
1008 * in virtchnl_vf_resource struct and can be used to determine
1009 * which link event struct to use below.
1010 */
1011 struct {
1012 enum virtchnl_link_speed link_speed;
1013 bool link_status;
1014 u8 pad[3];
1015 } link_event;
1016 struct {
1017 /* link_speed provided in Mbps */
1018 u32 link_speed;
1019 u8 link_status;
1020 u8 pad[3];
1021 } link_event_adv;
1022 } event_data;
1023
1024 s32 severity;
1025 };
1026
1027 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
1028
1029 /* used to specify if a ceq_idx or aeq_idx is invalid */
1030 #define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
1031 /* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
1032 * VF uses this message to request PF to map RDMA vectors to RDMA queues.
1033 * The request for this originates from the VF RDMA driver through
1034 * a client interface between VF LAN and VF RDMA driver.
1035 * A vector could have an AEQ and CEQ attached to it although
1036 * there is a single AEQ per VF RDMA instance in which case
1037 * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
1038 * idx for ceqs There will never be a case where there will be multiple CEQs
1039 * attached to a single vector.
1040 * PF configures interrupt mapping and returns status.
1041 */
1042
1043 struct virtchnl_rdma_qv_info {
1044 u32 v_idx; /* msix_vector */
1045 u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1046 u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1047 u8 itr_idx;
1048 u8 pad[3];
1049 };
1050
1051 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
1052
1053 struct virtchnl_rdma_qvlist_info {
1054 u32 num_vectors;
1055 struct virtchnl_rdma_qv_info qv_info[1];
1056 };
1057
1058 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_rdma_qvlist_info);
1059
1060 /* VF reset states - these are written into the RSTAT register:
1061 * VFGEN_RSTAT on the VF
1062 * When the PF initiates a reset, it writes 0
1063 * When the reset is complete, it writes 1
1064 * When the PF detects that the VF has recovered, it writes 2
1065 * VF checks this register periodically to determine if a reset has occurred,
1066 * then polls it to know when the reset is complete.
1067 * If either the PF or VF reads the register while the hardware
1068 * is in a reset state, it will return DEADBEEF, which, when masked
1069 * will result in 3.
1070 */
1071 enum virtchnl_vfr_states {
1072 VIRTCHNL_VFR_INPROGRESS = 0,
1073 VIRTCHNL_VFR_COMPLETED,
1074 VIRTCHNL_VFR_VFACTIVE,
1075 };
1076
1077 /* Type of RSS algorithm */
1078 enum virtchnl_rss_algorithm {
1079 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
1080 VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
1081 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
1082 VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
1083 };
1084
1085 #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
1086 #define PROTO_HDR_SHIFT 5
1087 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
1088 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
1089
1090 /* VF use these macros to configure each protocol header.
1091 * Specify which protocol headers and protocol header fields base on
1092 * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
1093 * @param hdr: a struct of virtchnl_proto_hdr
1094 * @param hdr_type: ETH/IPV4/TCP, etc
1095 * @param field: SRC/DST/TEID/SPI, etc
1096 */
1097 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
1098 ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
1099 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
1100 ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
1101 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
1102 ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
1103 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
1104
1105 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1106 (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
1107 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1108 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1109 (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
1110 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1111
1112 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
1113 ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
1114 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
1115 (((hdr)->type) >> PROTO_HDR_SHIFT)
1116 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
1117 ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
1118 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
1119 (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
1120 VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
1121
1122 /* Protocol header type within a packet segment. A segment consists of one or
1123 * more protocol headers that make up a logical group of protocol headers. Each
1124 * logical group of protocol headers encapsulates or is encapsulated using/by
1125 * tunneling or encapsulation protocols for network virtualization.
1126 */
1127 enum virtchnl_proto_hdr_type {
1128 VIRTCHNL_PROTO_HDR_NONE,
1129 VIRTCHNL_PROTO_HDR_ETH,
1130 VIRTCHNL_PROTO_HDR_S_VLAN,
1131 VIRTCHNL_PROTO_HDR_C_VLAN,
1132 VIRTCHNL_PROTO_HDR_IPV4,
1133 VIRTCHNL_PROTO_HDR_IPV6,
1134 VIRTCHNL_PROTO_HDR_TCP,
1135 VIRTCHNL_PROTO_HDR_UDP,
1136 VIRTCHNL_PROTO_HDR_SCTP,
1137 VIRTCHNL_PROTO_HDR_GTPU_IP,
1138 VIRTCHNL_PROTO_HDR_GTPU_EH,
1139 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
1140 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
1141 VIRTCHNL_PROTO_HDR_PPPOE,
1142 VIRTCHNL_PROTO_HDR_L2TPV3,
1143 VIRTCHNL_PROTO_HDR_ESP,
1144 VIRTCHNL_PROTO_HDR_AH,
1145 VIRTCHNL_PROTO_HDR_PFCP,
1146 };
1147
1148 /* Protocol header field within a protocol header. */
1149 enum virtchnl_proto_hdr_field {
1150 /* ETHER */
1151 VIRTCHNL_PROTO_HDR_ETH_SRC =
1152 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
1153 VIRTCHNL_PROTO_HDR_ETH_DST,
1154 VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
1155 /* S-VLAN */
1156 VIRTCHNL_PROTO_HDR_S_VLAN_ID =
1157 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
1158 /* C-VLAN */
1159 VIRTCHNL_PROTO_HDR_C_VLAN_ID =
1160 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
1161 /* IPV4 */
1162 VIRTCHNL_PROTO_HDR_IPV4_SRC =
1163 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
1164 VIRTCHNL_PROTO_HDR_IPV4_DST,
1165 VIRTCHNL_PROTO_HDR_IPV4_DSCP,
1166 VIRTCHNL_PROTO_HDR_IPV4_TTL,
1167 VIRTCHNL_PROTO_HDR_IPV4_PROT,
1168 /* IPV6 */
1169 VIRTCHNL_PROTO_HDR_IPV6_SRC =
1170 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
1171 VIRTCHNL_PROTO_HDR_IPV6_DST,
1172 VIRTCHNL_PROTO_HDR_IPV6_TC,
1173 VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
1174 VIRTCHNL_PROTO_HDR_IPV6_PROT,
1175 /* TCP */
1176 VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
1177 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
1178 VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
1179 /* UDP */
1180 VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
1181 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
1182 VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
1183 /* SCTP */
1184 VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
1185 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
1186 VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
1187 /* GTPU_IP */
1188 VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
1189 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
1190 /* GTPU_EH */
1191 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
1192 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
1193 VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
1194 /* PPPOE */
1195 VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
1196 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
1197 /* L2TPV3 */
1198 VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
1199 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
1200 /* ESP */
1201 VIRTCHNL_PROTO_HDR_ESP_SPI =
1202 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
1203 /* AH */
1204 VIRTCHNL_PROTO_HDR_AH_SPI =
1205 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
1206 /* PFCP */
1207 VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
1208 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
1209 VIRTCHNL_PROTO_HDR_PFCP_SEID,
1210 };
1211
1212 struct virtchnl_proto_hdr {
1213 /* see enum virtchnl_proto_hdr_type */
1214 s32 type;
1215 u32 field_selector; /* a bit mask to select field for header type */
1216 u8 buffer[64];
1217 /**
1218 * binary buffer in network order for specific header type.
1219 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
1220 * header is expected to be copied into the buffer.
1221 */
1222 };
1223
1224 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
1225
1226 struct virtchnl_proto_hdrs {
1227 u8 tunnel_level;
1228 u8 pad[3];
1229 /**
1230 * specify where protocol header start from.
1231 * 0 - from the outer layer
1232 * 1 - from the first inner layer
1233 * 2 - from the second inner layer
1234 * ....
1235 **/
1236 int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
1237 struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
1238 };
1239
1240 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
1241
1242 struct virtchnl_rss_cfg {
1243 struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
1244
1245 /* see enum virtchnl_rss_algorithm; rss algorithm type */
1246 s32 rss_algorithm;
1247 u8 reserved[128]; /* reserve for future */
1248 };
1249
1250 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
1251
1252 /* action configuration for FDIR */
1253 struct virtchnl_filter_action {
1254 /* see enum virtchnl_action type */
1255 s32 type;
1256 union {
1257 /* used for queue and qgroup action */
1258 struct {
1259 u16 index;
1260 u8 region;
1261 } queue;
1262 /* used for count action */
1263 struct {
1264 /* share counter ID with other flow rules */
1265 u8 shared;
1266 u32 id; /* counter ID */
1267 } count;
1268 /* used for mark action */
1269 u32 mark_id;
1270 u8 reserve[32];
1271 } act_conf;
1272 };
1273
1274 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
1275
1276 #define VIRTCHNL_MAX_NUM_ACTIONS 8
1277
1278 struct virtchnl_filter_action_set {
1279 /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
1280 int count;
1281 struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
1282 };
1283
1284 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
1285
1286 /* pattern and action for FDIR rule */
1287 struct virtchnl_fdir_rule {
1288 struct virtchnl_proto_hdrs proto_hdrs;
1289 struct virtchnl_filter_action_set action_set;
1290 };
1291
1292 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
1293
1294 /* Status returned to VF after VF requests FDIR commands
1295 * VIRTCHNL_FDIR_SUCCESS
1296 * VF FDIR related request is successfully done by PF
1297 * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
1298 *
1299 * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
1300 * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
1301 *
1302 * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
1303 * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
1304 *
1305 * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
1306 * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
1307 *
1308 * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
1309 * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
1310 *
1311 * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
1312 * OP_ADD_FDIR_FILTER request is failed due to parameters validation
1313 * or HW doesn't support.
1314 *
1315 * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
1316 * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
1317 * for programming.
1318 *
1319 * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
1320 * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
1321 * for example, VF query counter of a rule who has no counter action.
1322 */
1323 enum virtchnl_fdir_prgm_status {
1324 VIRTCHNL_FDIR_SUCCESS = 0,
1325 VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
1326 VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
1327 VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
1328 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
1329 VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
1330 VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
1331 VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
1332 };
1333
1334 /* VIRTCHNL_OP_ADD_FDIR_FILTER
1335 * VF sends this request to PF by filling out vsi_id,
1336 * validate_only and rule_cfg. PF will return flow_id
1337 * if the request is successfully done and return add_status to VF.
1338 */
1339 struct virtchnl_fdir_add {
1340 u16 vsi_id; /* INPUT */
1341 /*
1342 * 1 for validating a fdir rule, 0 for creating a fdir rule.
1343 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
1344 */
1345 u16 validate_only; /* INPUT */
1346 u32 flow_id; /* OUTPUT */
1347 struct virtchnl_fdir_rule rule_cfg; /* INPUT */
1348
1349 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1350 s32 status;
1351 };
1352
1353 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
1354
1355 /* VIRTCHNL_OP_DEL_FDIR_FILTER
1356 * VF sends this request to PF by filling out vsi_id
1357 * and flow_id. PF will return del_status to VF.
1358 */
1359 struct virtchnl_fdir_del {
1360 u16 vsi_id; /* INPUT */
1361 u16 pad;
1362 u32 flow_id; /* INPUT */
1363
1364 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1365 s32 status;
1366 };
1367
1368 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
1369
1370 /**
1371 * virtchnl_vc_validate_vf_msg
1372 * @ver: Virtchnl version info
1373 * @v_opcode: Opcode for the message
1374 * @msg: pointer to the msg buffer
1375 * @msglen: msg length
1376 *
1377 * validate msg format against struct for each opcode
1378 */
1379 static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info * ver,u32 v_opcode,u8 * msg,u16 msglen)1380 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
1381 u8 *msg, u16 msglen)
1382 {
1383 bool err_msg_format = false;
1384 u32 valid_len = 0;
1385
1386 /* Validate message length. */
1387 switch (v_opcode) {
1388 case VIRTCHNL_OP_VERSION:
1389 valid_len = sizeof(struct virtchnl_version_info);
1390 break;
1391 case VIRTCHNL_OP_RESET_VF:
1392 break;
1393 case VIRTCHNL_OP_GET_VF_RESOURCES:
1394 if (VF_IS_V11(ver))
1395 valid_len = sizeof(u32);
1396 break;
1397 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1398 valid_len = sizeof(struct virtchnl_txq_info);
1399 break;
1400 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1401 valid_len = sizeof(struct virtchnl_rxq_info);
1402 break;
1403 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1404 valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
1405 if (msglen >= valid_len) {
1406 struct virtchnl_vsi_queue_config_info *vqc =
1407 (struct virtchnl_vsi_queue_config_info *)msg;
1408 valid_len += (vqc->num_queue_pairs *
1409 sizeof(struct
1410 virtchnl_queue_pair_info));
1411 if (vqc->num_queue_pairs == 0)
1412 err_msg_format = true;
1413 }
1414 break;
1415 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1416 valid_len = sizeof(struct virtchnl_irq_map_info);
1417 if (msglen >= valid_len) {
1418 struct virtchnl_irq_map_info *vimi =
1419 (struct virtchnl_irq_map_info *)msg;
1420 valid_len += (vimi->num_vectors *
1421 sizeof(struct virtchnl_vector_map));
1422 if (vimi->num_vectors == 0)
1423 err_msg_format = true;
1424 }
1425 break;
1426 case VIRTCHNL_OP_ENABLE_QUEUES:
1427 case VIRTCHNL_OP_DISABLE_QUEUES:
1428 valid_len = sizeof(struct virtchnl_queue_select);
1429 break;
1430 case VIRTCHNL_OP_ADD_ETH_ADDR:
1431 case VIRTCHNL_OP_DEL_ETH_ADDR:
1432 valid_len = sizeof(struct virtchnl_ether_addr_list);
1433 if (msglen >= valid_len) {
1434 struct virtchnl_ether_addr_list *veal =
1435 (struct virtchnl_ether_addr_list *)msg;
1436 valid_len += veal->num_elements *
1437 sizeof(struct virtchnl_ether_addr);
1438 if (veal->num_elements == 0)
1439 err_msg_format = true;
1440 }
1441 break;
1442 case VIRTCHNL_OP_ADD_VLAN:
1443 case VIRTCHNL_OP_DEL_VLAN:
1444 valid_len = sizeof(struct virtchnl_vlan_filter_list);
1445 if (msglen >= valid_len) {
1446 struct virtchnl_vlan_filter_list *vfl =
1447 (struct virtchnl_vlan_filter_list *)msg;
1448 valid_len += vfl->num_elements * sizeof(u16);
1449 if (vfl->num_elements == 0)
1450 err_msg_format = true;
1451 }
1452 break;
1453 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1454 valid_len = sizeof(struct virtchnl_promisc_info);
1455 break;
1456 case VIRTCHNL_OP_GET_STATS:
1457 valid_len = sizeof(struct virtchnl_queue_select);
1458 break;
1459 case VIRTCHNL_OP_RDMA:
1460 /* These messages are opaque to us and will be validated in
1461 * the RDMA client code. We just need to check for nonzero
1462 * length. The firmware will enforce max length restrictions.
1463 */
1464 if (msglen)
1465 valid_len = msglen;
1466 else
1467 err_msg_format = true;
1468 break;
1469 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
1470 break;
1471 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
1472 valid_len = sizeof(struct virtchnl_rdma_qvlist_info);
1473 if (msglen >= valid_len) {
1474 struct virtchnl_rdma_qvlist_info *qv =
1475 (struct virtchnl_rdma_qvlist_info *)msg;
1476
1477 valid_len += ((qv->num_vectors - 1) *
1478 sizeof(struct virtchnl_rdma_qv_info));
1479 }
1480 break;
1481 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1482 valid_len = sizeof(struct virtchnl_rss_key);
1483 if (msglen >= valid_len) {
1484 struct virtchnl_rss_key *vrk =
1485 (struct virtchnl_rss_key *)msg;
1486 valid_len += vrk->key_len - 1;
1487 }
1488 break;
1489 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1490 valid_len = sizeof(struct virtchnl_rss_lut);
1491 if (msglen >= valid_len) {
1492 struct virtchnl_rss_lut *vrl =
1493 (struct virtchnl_rss_lut *)msg;
1494 valid_len += vrl->lut_entries - 1;
1495 }
1496 break;
1497 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
1498 break;
1499 case VIRTCHNL_OP_SET_RSS_HENA:
1500 valid_len = sizeof(struct virtchnl_rss_hena);
1501 break;
1502 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1503 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1504 break;
1505 case VIRTCHNL_OP_REQUEST_QUEUES:
1506 valid_len = sizeof(struct virtchnl_vf_res_request);
1507 break;
1508 case VIRTCHNL_OP_ENABLE_CHANNELS:
1509 valid_len = sizeof(struct virtchnl_tc_info);
1510 if (msglen >= valid_len) {
1511 struct virtchnl_tc_info *vti =
1512 (struct virtchnl_tc_info *)msg;
1513 valid_len += (vti->num_tc - 1) *
1514 sizeof(struct virtchnl_channel_info);
1515 if (vti->num_tc == 0)
1516 err_msg_format = true;
1517 }
1518 break;
1519 case VIRTCHNL_OP_DISABLE_CHANNELS:
1520 break;
1521 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
1522 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
1523 valid_len = sizeof(struct virtchnl_filter);
1524 break;
1525 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
1526 break;
1527 case VIRTCHNL_OP_ADD_RSS_CFG:
1528 case VIRTCHNL_OP_DEL_RSS_CFG:
1529 valid_len = sizeof(struct virtchnl_rss_cfg);
1530 break;
1531 case VIRTCHNL_OP_ADD_FDIR_FILTER:
1532 valid_len = sizeof(struct virtchnl_fdir_add);
1533 break;
1534 case VIRTCHNL_OP_DEL_FDIR_FILTER:
1535 valid_len = sizeof(struct virtchnl_fdir_del);
1536 break;
1537 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
1538 break;
1539 case VIRTCHNL_OP_ADD_VLAN_V2:
1540 case VIRTCHNL_OP_DEL_VLAN_V2:
1541 valid_len = sizeof(struct virtchnl_vlan_filter_list_v2);
1542 if (msglen >= valid_len) {
1543 struct virtchnl_vlan_filter_list_v2 *vfl =
1544 (struct virtchnl_vlan_filter_list_v2 *)msg;
1545
1546 valid_len += (vfl->num_elements - 1) *
1547 sizeof(struct virtchnl_vlan_filter);
1548
1549 if (vfl->num_elements == 0) {
1550 err_msg_format = true;
1551 break;
1552 }
1553 }
1554 break;
1555 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1556 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1557 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1558 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1559 valid_len = sizeof(struct virtchnl_vlan_setting);
1560 break;
1561 /* These are always errors coming from the VF. */
1562 case VIRTCHNL_OP_EVENT:
1563 case VIRTCHNL_OP_UNKNOWN:
1564 default:
1565 return VIRTCHNL_STATUS_ERR_PARAM;
1566 }
1567 /* few more checks */
1568 if (err_msg_format || valid_len != msglen)
1569 return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
1570
1571 return 0;
1572 }
1573 #endif /* _VIRTCHNL_H_ */
1574