1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_vf_mbx.h"
6 
7 /**
8  * ice_aq_send_msg_to_vf
9  * @hw: pointer to the hardware structure
10  * @vfid: VF ID to send msg
11  * @v_opcode: opcodes for VF-PF communication
12  * @v_retval: return error code
13  * @msg: pointer to the msg buffer
14  * @msglen: msg length
15  * @cd: pointer to command details
16  *
17  * Send message to VF driver (0x0802) using mailbox
18  * queue and asynchronously sending message via
19  * ice_sq_send_cmd() function
20  */
21 int
ice_aq_send_msg_to_vf(struct ice_hw * hw,u16 vfid,u32 v_opcode,u32 v_retval,u8 * msg,u16 msglen,struct ice_sq_cd * cd)22 ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
23 		      u8 *msg, u16 msglen, struct ice_sq_cd *cd)
24 {
25 	struct ice_aqc_pf_vf_msg *cmd;
26 	struct ice_aq_desc desc;
27 
28 	ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
29 
30 	cmd = &desc.params.virt;
31 	cmd->id = cpu_to_le32(vfid);
32 
33 	desc.cookie_high = cpu_to_le32(v_opcode);
34 	desc.cookie_low = cpu_to_le32(v_retval);
35 
36 	if (msglen)
37 		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
38 
39 	return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
40 }
41 
42 static const u32 ice_legacy_aq_to_vc_speed[] = {
43 	VIRTCHNL_LINK_SPEED_100MB,	/* BIT(0) */
44 	VIRTCHNL_LINK_SPEED_100MB,
45 	VIRTCHNL_LINK_SPEED_1GB,
46 	VIRTCHNL_LINK_SPEED_1GB,
47 	VIRTCHNL_LINK_SPEED_1GB,
48 	VIRTCHNL_LINK_SPEED_10GB,
49 	VIRTCHNL_LINK_SPEED_20GB,
50 	VIRTCHNL_LINK_SPEED_25GB,
51 	VIRTCHNL_LINK_SPEED_40GB,
52 	VIRTCHNL_LINK_SPEED_40GB,
53 	VIRTCHNL_LINK_SPEED_40GB,
54 };
55 
56 /**
57  * ice_conv_link_speed_to_virtchnl
58  * @adv_link_support: determines the format of the returned link speed
59  * @link_speed: variable containing the link_speed to be converted
60  *
61  * Convert link speed supported by HW to link speed supported by virtchnl.
62  * If adv_link_support is true, then return link speed in Mbps. Else return
63  * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
64  * needs to cast back to an enum virtchnl_link_speed in the case where
65  * adv_link_support is false, but when adv_link_support is true the caller can
66  * expect the speed in Mbps.
67  */
ice_conv_link_speed_to_virtchnl(bool adv_link_support,u16 link_speed)68 u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
69 {
70 	/* convert a BIT() value into an array index */
71 	u32 index = fls(link_speed) - 1;
72 
73 	if (adv_link_support)
74 		return ice_get_link_speed(index);
75 	else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
76 		/* Virtchnl speeds are not defined for every speed supported in
77 		 * the hardware. To maintain compatibility with older AVF
78 		 * drivers, while reporting the speed the new speed values are
79 		 * resolved to the closest known virtchnl speeds
80 		 */
81 		return ice_legacy_aq_to_vc_speed[index];
82 
83 	return VIRTCHNL_LINK_SPEED_UNKNOWN;
84 }
85 
86 /* The mailbox overflow detection algorithm helps to check if there
87  * is a possibility of a malicious VF transmitting too many MBX messages to the
88  * PF.
89  * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
90  * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
91  * The struct ice_mbx_snapshot helps to track and traverse a static window of
92  * messages within the mailbox queue while looking for a malicious VF.
93  *
94  * 2. When the caller starts processing its mailbox queue in response to an
95  * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
96  * the algorithm can be run for the first time for that interrupt. This can be
97  * done via ice_mbx_reset_snapshot().
98  *
99  * 3. For every message read by the caller from the MBX Queue, the caller must
100  * call the detection algorithm's entry function ice_mbx_vf_state_handler().
101  * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
102  * filled as it is required to be passed to the algorithm.
103  *
104  * 4. Every time a message is read from the MBX queue, a VFId is received which
105  * is passed to the state handler. The boolean output is_malvf of the state
106  * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
107  * whether this VF is malicious or not.
108  *
109  * 5. When a VF is identified to be malicious, the caller can send a message
110  * to the system administrator. The caller can invoke ice_mbx_report_malvf()
111  * to help determine if a malicious VF is to be reported or not. This function
112  * requires the caller to maintain a global bitmap to track all malicious VFs
113  * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
114  * to be malicious by ice_mbx_vf_state_handler().
115  *
116  * 6. The global bitmap maintained by PF can be cleared completely if PF is in
117  * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
118  * When a VF is shut down and brought back up, we assume that the new VF
119  * brought up is not malicious and hence report it if found malicious.
120  *
121  * 7. The function ice_mbx_reset_snapshot() is called to reset the information
122  * in ice_mbx_snapshot for every new mailbox interrupt handled.
123  *
124  * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
125  * when driver is unloaded.
126  */
127 #define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
128 /* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
129  * the max messages check must be ignored in the algorithm
130  */
131 #define ICE_IGNORE_MAX_MSG_CNT	0xFFFF
132 
133 /**
134  * ice_mbx_traverse - Pass through mailbox snapshot
135  * @hw: pointer to the HW struct
136  * @new_state: new algorithm state
137  *
138  * Traversing the mailbox static snapshot without checking
139  * for malicious VFs.
140  */
141 static void
ice_mbx_traverse(struct ice_hw * hw,enum ice_mbx_snapshot_state * new_state)142 ice_mbx_traverse(struct ice_hw *hw,
143 		 enum ice_mbx_snapshot_state *new_state)
144 {
145 	struct ice_mbx_snap_buffer_data *snap_buf;
146 	u32 num_iterations;
147 
148 	snap_buf = &hw->mbx_snapshot.mbx_buf;
149 
150 	/* As mailbox buffer is circular, applying a mask
151 	 * on the incremented iteration count.
152 	 */
153 	num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
154 
155 	/* Checking either of the below conditions to exit snapshot traversal:
156 	 * Condition-1: If the number of iterations in the mailbox is equal to
157 	 * the mailbox head which would indicate that we have reached the end
158 	 * of the static snapshot.
159 	 * Condition-2: If the maximum messages serviced in the mailbox for a
160 	 * given interrupt is the highest possible value then there is no need
161 	 * to check if the number of messages processed is equal to it. If not
162 	 * check if the number of messages processed is greater than or equal
163 	 * to the maximum number of mailbox entries serviced in current work item.
164 	 */
165 	if (num_iterations == snap_buf->head ||
166 	    (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
167 	     ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
168 		*new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
169 }
170 
171 /**
172  * ice_mbx_detect_malvf - Detect malicious VF in snapshot
173  * @hw: pointer to the HW struct
174  * @vf_id: relative virtual function ID
175  * @new_state: new algorithm state
176  * @is_malvf: boolean output to indicate if VF is malicious
177  *
178  * This function tracks the number of asynchronous messages
179  * sent per VF and marks the VF as malicious if it exceeds
180  * the permissible number of messages to send.
181  */
182 static int
ice_mbx_detect_malvf(struct ice_hw * hw,u16 vf_id,enum ice_mbx_snapshot_state * new_state,bool * is_malvf)183 ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
184 		     enum ice_mbx_snapshot_state *new_state,
185 		     bool *is_malvf)
186 {
187 	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
188 
189 	if (vf_id >= snap->mbx_vf.vfcntr_len)
190 		return -EIO;
191 
192 	/* increment the message count in the VF array */
193 	snap->mbx_vf.vf_cntr[vf_id]++;
194 
195 	if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
196 		*is_malvf = true;
197 
198 	/* continue to iterate through the mailbox snapshot */
199 	ice_mbx_traverse(hw, new_state);
200 
201 	return 0;
202 }
203 
204 /**
205  * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
206  * @snap: pointer to mailbox snapshot structure in the ice_hw struct
207  *
208  * Reset the mailbox snapshot structure and clear VF counter array.
209  */
ice_mbx_reset_snapshot(struct ice_mbx_snapshot * snap)210 static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
211 {
212 	u32 vfcntr_len;
213 
214 	if (!snap || !snap->mbx_vf.vf_cntr)
215 		return;
216 
217 	/* Clear VF counters. */
218 	vfcntr_len = snap->mbx_vf.vfcntr_len;
219 	if (vfcntr_len)
220 		memset(snap->mbx_vf.vf_cntr, 0,
221 		       (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
222 
223 	/* Reset mailbox snapshot for a new capture. */
224 	memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
225 	snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
226 }
227 
228 /**
229  * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
230  * @hw: pointer to the HW struct
231  * @mbx_data: pointer to structure containing mailbox data
232  * @vf_id: relative virtual function (VF) ID
233  * @is_malvf: boolean output to indicate if VF is malicious
234  *
235  * The function serves as an entry point for the malicious VF
236  * detection algorithm by handling the different states and state
237  * transitions of the algorithm:
238  * New snapshot: This state is entered when creating a new static
239  * snapshot. The data from any previous mailbox snapshot is
240  * cleared and a new capture of the mailbox head and tail is
241  * logged. This will be the new static snapshot to detect
242  * asynchronous messages sent by VFs. On capturing the snapshot
243  * and depending on whether the number of pending messages in that
244  * snapshot exceed the watermark value, the state machine enters
245  * traverse or detect states.
246  * Traverse: If pending message count is below watermark then iterate
247  * through the snapshot without any action on VF.
248  * Detect: If pending message count exceeds watermark traverse
249  * the static snapshot and look for a malicious VF.
250  */
251 int
ice_mbx_vf_state_handler(struct ice_hw * hw,struct ice_mbx_data * mbx_data,u16 vf_id,bool * is_malvf)252 ice_mbx_vf_state_handler(struct ice_hw *hw,
253 			 struct ice_mbx_data *mbx_data, u16 vf_id,
254 			 bool *is_malvf)
255 {
256 	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
257 	struct ice_mbx_snap_buffer_data *snap_buf;
258 	struct ice_ctl_q_info *cq = &hw->mailboxq;
259 	enum ice_mbx_snapshot_state new_state;
260 	int status = 0;
261 
262 	if (!is_malvf || !mbx_data)
263 		return -EINVAL;
264 
265 	/* When entering the mailbox state machine assume that the VF
266 	 * is not malicious until detected.
267 	 */
268 	*is_malvf = false;
269 
270 	 /* Checking if max messages allowed to be processed while servicing current
271 	  * interrupt is not less than the defined AVF message threshold.
272 	  */
273 	if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
274 		return -EINVAL;
275 
276 	/* The watermark value should not be lesser than the threshold limit
277 	 * set for the number of asynchronous messages a VF can send to mailbox
278 	 * nor should it be greater than the maximum number of messages in the
279 	 * mailbox serviced in current interrupt.
280 	 */
281 	if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
282 	    mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
283 		return -EINVAL;
284 
285 	new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
286 	snap_buf = &snap->mbx_buf;
287 
288 	switch (snap_buf->state) {
289 	case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
290 		/* Clear any previously held data in mailbox snapshot structure. */
291 		ice_mbx_reset_snapshot(snap);
292 
293 		/* Collect the pending ARQ count, number of messages processed and
294 		 * the maximum number of messages allowed to be processed from the
295 		 * Mailbox for current interrupt.
296 		 */
297 		snap_buf->num_pending_arq = mbx_data->num_pending_arq;
298 		snap_buf->num_msg_proc = mbx_data->num_msg_proc;
299 		snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
300 
301 		/* Capture a new static snapshot of the mailbox by logging the
302 		 * head and tail of snapshot and set num_iterations to the tail
303 		 * value to mark the start of the iteration through the snapshot.
304 		 */
305 		snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
306 						  mbx_data->num_pending_arq);
307 		snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
308 		snap_buf->num_iterations = snap_buf->tail;
309 
310 		/* Pending ARQ messages returned by ice_clean_rq_elem
311 		 * is the difference between the head and tail of the
312 		 * mailbox queue. Comparing this value against the watermark
313 		 * helps to check if we potentially have malicious VFs.
314 		 */
315 		if (snap_buf->num_pending_arq >=
316 		    mbx_data->async_watermark_val) {
317 			new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
318 			status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
319 		} else {
320 			new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
321 			ice_mbx_traverse(hw, &new_state);
322 		}
323 		break;
324 
325 	case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
326 		new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
327 		ice_mbx_traverse(hw, &new_state);
328 		break;
329 
330 	case ICE_MAL_VF_DETECT_STATE_DETECT:
331 		new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
332 		status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
333 		break;
334 
335 	default:
336 		new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
337 		status = -EIO;
338 	}
339 
340 	snap_buf->state = new_state;
341 
342 	return status;
343 }
344 
345 /**
346  * ice_mbx_report_malvf - Track and note malicious VF
347  * @hw: pointer to the HW struct
348  * @all_malvfs: all malicious VFs tracked by PF
349  * @bitmap_len: length of bitmap in bits
350  * @vf_id: relative virtual function ID of the malicious VF
351  * @report_malvf: boolean to indicate if malicious VF must be reported
352  *
353  * This function will update a bitmap that keeps track of the malicious
354  * VFs attached to the PF. A malicious VF must be reported only once if
355  * discovered between VF resets or loading so the function checks
356  * the input vf_id against the bitmap to verify if the VF has been
357  * detected in any previous mailbox iterations.
358  */
359 int
ice_mbx_report_malvf(struct ice_hw * hw,unsigned long * all_malvfs,u16 bitmap_len,u16 vf_id,bool * report_malvf)360 ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
361 		     u16 bitmap_len, u16 vf_id, bool *report_malvf)
362 {
363 	if (!all_malvfs || !report_malvf)
364 		return -EINVAL;
365 
366 	*report_malvf = false;
367 
368 	if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
369 		return -EINVAL;
370 
371 	if (vf_id >= bitmap_len)
372 		return -EIO;
373 
374 	/* If the vf_id is found in the bitmap set bit and boolean to true */
375 	if (!test_and_set_bit(vf_id, all_malvfs))
376 		*report_malvf = true;
377 
378 	return 0;
379 }
380 
381 /**
382  * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
383  * @snap: pointer to the mailbox snapshot structure
384  * @all_malvfs: all malicious VFs tracked by PF
385  * @bitmap_len: length of bitmap in bits
386  * @vf_id: relative virtual function ID of the malicious VF
387  *
388  * In case of a VF reset, this function can be called to clear
389  * the bit corresponding to the VF ID in the bitmap tracking all
390  * malicious VFs attached to the PF. The function also clears the
391  * VF counter array at the index of the VF ID. This is to ensure
392  * that the new VF loaded is not considered malicious before going
393  * through the overflow detection algorithm.
394  */
395 int
ice_mbx_clear_malvf(struct ice_mbx_snapshot * snap,unsigned long * all_malvfs,u16 bitmap_len,u16 vf_id)396 ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
397 		    u16 bitmap_len, u16 vf_id)
398 {
399 	if (!snap || !all_malvfs)
400 		return -EINVAL;
401 
402 	if (bitmap_len < snap->mbx_vf.vfcntr_len)
403 		return -EINVAL;
404 
405 	/* Ensure VF ID value is not larger than bitmap or VF counter length */
406 	if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
407 		return -EIO;
408 
409 	/* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
410 	clear_bit(vf_id, all_malvfs);
411 
412 	/* Clear the VF counter in the mailbox snapshot structure for that VF ID.
413 	 * This is to ensure that if a VF is unloaded and a new one brought back
414 	 * up with the same VF ID for a snapshot currently in traversal or detect
415 	 * state the counter for that VF ID does not increment on top of existing
416 	 * values in the mailbox overflow detection algorithm.
417 	 */
418 	snap->mbx_vf.vf_cntr[vf_id] = 0;
419 
420 	return 0;
421 }
422 
423 /**
424  * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
425  * @hw: pointer to the hardware structure
426  * @vf_count: number of VFs allocated on a PF
427  *
428  * Clear the mailbox snapshot structure and allocate memory
429  * for the VF counter array based on the number of VFs allocated
430  * on that PF.
431  *
432  * Assumption: This function will assume ice_get_caps() has already been
433  * called to ensure that the vf_count can be compared against the number
434  * of VFs supported as defined in the functional capabilities of the device.
435  */
ice_mbx_init_snapshot(struct ice_hw * hw,u16 vf_count)436 int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
437 {
438 	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
439 
440 	/* Ensure that the number of VFs allocated is non-zero and
441 	 * is not greater than the number of supported VFs defined in
442 	 * the functional capabilities of the PF.
443 	 */
444 	if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
445 		return -EINVAL;
446 
447 	snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
448 					    sizeof(*snap->mbx_vf.vf_cntr),
449 					    GFP_KERNEL);
450 	if (!snap->mbx_vf.vf_cntr)
451 		return -ENOMEM;
452 
453 	/* Setting the VF counter length to the number of allocated
454 	 * VFs for given PF's functional capabilities.
455 	 */
456 	snap->mbx_vf.vfcntr_len = vf_count;
457 
458 	/* Clear mbx_buf in the mailbox snaphot structure and setting the
459 	 * mailbox snapshot state to a new capture.
460 	 */
461 	memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
462 	snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
463 
464 	return 0;
465 }
466 
467 /**
468  * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
469  * @hw: pointer to the hardware structure
470  *
471  * Clear the mailbox snapshot structure and free the VF counter array.
472  */
ice_mbx_deinit_snapshot(struct ice_hw * hw)473 void ice_mbx_deinit_snapshot(struct ice_hw *hw)
474 {
475 	struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
476 
477 	/* Free VF counter array and reset VF counter length */
478 	devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
479 	snap->mbx_vf.vfcntr_len = 0;
480 
481 	/* Clear mbx_buf in the mailbox snaphot structure */
482 	memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
483 }
484