1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2013-2021, Mellanox Technologies inc.  All rights reserved.
4  */
5 
6 #include <linux/interrupt.h>
7 #include <linux/notifier.h>
8 #include <linux/mlx5/driver.h>
9 #include <linux/mlx5/vport.h>
10 #include <linux/mlx5/eq.h>
11 #ifdef CONFIG_RFS_ACCEL
12 #include <linux/cpu_rmap.h>
13 #endif
14 #include "mlx5_core.h"
15 #include "lib/eq.h"
16 #include "fpga/core.h"
17 #include "eswitch.h"
18 #include "lib/clock.h"
19 #include "diag/fw_tracer.h"
20 #include "mlx5_irq.h"
21 #include "devlink.h"
22 #include "en_accel/ipsec.h"
23 
24 enum {
25 	MLX5_EQE_OWNER_INIT_VAL	= 0x1,
26 };
27 
28 enum {
29 	MLX5_EQ_STATE_ARMED		= 0x9,
30 	MLX5_EQ_STATE_FIRED		= 0xa,
31 	MLX5_EQ_STATE_ALWAYS_ARMED	= 0xb,
32 };
33 
34 enum {
35 	MLX5_EQ_DOORBEL_OFFSET	= 0x40,
36 };
37 
38 /* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
39  * the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
40  * used to set the EQ size, budget must be smaller than the EQ size.
41  */
42 enum {
43 	MLX5_EQ_POLLING_BUDGET	= 128,
44 };
45 
46 static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
47 
48 struct mlx5_eq_table {
49 	struct list_head        comp_eqs_list;
50 	struct mlx5_eq_async    pages_eq;
51 	struct mlx5_eq_async    cmd_eq;
52 	struct mlx5_eq_async    async_eq;
53 
54 	struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
55 
56 	/* Since CQ DB is stored in async_eq */
57 	struct mlx5_nb          cq_err_nb;
58 
59 	struct mutex            lock; /* sync async eqs creations */
60 	int			num_comp_eqs;
61 	struct mlx5_irq_table	*irq_table;
62 	struct mlx5_irq         **comp_irqs;
63 	struct mlx5_irq         *ctrl_irq;
64 #ifdef CONFIG_RFS_ACCEL
65 	struct cpu_rmap		*rmap;
66 #endif
67 };
68 
69 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)	    | \
70 			       (1ull << MLX5_EVENT_TYPE_COMM_EST)	    | \
71 			       (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)	    | \
72 			       (1ull << MLX5_EVENT_TYPE_CQ_ERROR)	    | \
73 			       (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)	    | \
74 			       (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
75 			       (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
76 			       (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
77 			       (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)	    | \
78 			       (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
79 			       (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)	    | \
80 			       (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
81 
mlx5_cmd_destroy_eq(struct mlx5_core_dev * dev,u8 eqn)82 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
83 {
84 	u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
85 
86 	MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
87 	MLX5_SET(destroy_eq_in, in, eq_number, eqn);
88 	return mlx5_cmd_exec_in(dev, destroy_eq, in);
89 }
90 
91 /* caller must eventually call mlx5_cq_put on the returned cq */
mlx5_eq_cq_get(struct mlx5_eq * eq,u32 cqn)92 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
93 {
94 	struct mlx5_cq_table *table = &eq->cq_table;
95 	struct mlx5_core_cq *cq = NULL;
96 
97 	rcu_read_lock();
98 	cq = radix_tree_lookup(&table->tree, cqn);
99 	if (likely(cq))
100 		mlx5_cq_hold(cq);
101 	rcu_read_unlock();
102 
103 	return cq;
104 }
105 
mlx5_eq_comp_int(struct notifier_block * nb,__always_unused unsigned long action,__always_unused void * data)106 static int mlx5_eq_comp_int(struct notifier_block *nb,
107 			    __always_unused unsigned long action,
108 			    __always_unused void *data)
109 {
110 	struct mlx5_eq_comp *eq_comp =
111 		container_of(nb, struct mlx5_eq_comp, irq_nb);
112 	struct mlx5_eq *eq = &eq_comp->core;
113 	struct mlx5_eqe *eqe;
114 	int num_eqes = 0;
115 	u32 cqn = -1;
116 
117 	eqe = next_eqe_sw(eq);
118 	if (!eqe)
119 		goto out;
120 
121 	do {
122 		struct mlx5_core_cq *cq;
123 
124 		/* Make sure we read EQ entry contents after we've
125 		 * checked the ownership bit.
126 		 */
127 		dma_rmb();
128 		/* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
129 		cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
130 
131 		cq = mlx5_eq_cq_get(eq, cqn);
132 		if (likely(cq)) {
133 			++cq->arm_sn;
134 			cq->comp(cq, eqe);
135 			mlx5_cq_put(cq);
136 		} else {
137 			dev_dbg_ratelimited(eq->dev->device,
138 					    "Completion event for bogus CQ 0x%x\n", cqn);
139 		}
140 
141 		++eq->cons_index;
142 
143 	} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
144 
145 out:
146 	eq_update_ci(eq, 1);
147 
148 	if (cqn != -1)
149 		tasklet_schedule(&eq_comp->tasklet_ctx.task);
150 
151 	return 0;
152 }
153 
154 /* Some architectures don't latch interrupts when they are disabled, so using
155  * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
156  * avoid losing them.  It is not recommended to use it, unless this is the last
157  * resort.
158  */
mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp * eq)159 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
160 {
161 	u32 count_eqe;
162 
163 	disable_irq(eq->core.irqn);
164 	count_eqe = eq->core.cons_index;
165 	mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
166 	count_eqe = eq->core.cons_index - count_eqe;
167 	enable_irq(eq->core.irqn);
168 
169 	return count_eqe;
170 }
171 
mlx5_eq_async_int_lock(struct mlx5_eq_async * eq,bool recovery,unsigned long * flags)172 static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
173 				   unsigned long *flags)
174 	__acquires(&eq->lock)
175 {
176 	if (!recovery)
177 		spin_lock(&eq->lock);
178 	else
179 		spin_lock_irqsave(&eq->lock, *flags);
180 }
181 
mlx5_eq_async_int_unlock(struct mlx5_eq_async * eq,bool recovery,unsigned long * flags)182 static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
183 				     unsigned long *flags)
184 	__releases(&eq->lock)
185 {
186 	if (!recovery)
187 		spin_unlock(&eq->lock);
188 	else
189 		spin_unlock_irqrestore(&eq->lock, *flags);
190 }
191 
192 enum async_eq_nb_action {
193 	ASYNC_EQ_IRQ_HANDLER = 0,
194 	ASYNC_EQ_RECOVER = 1,
195 };
196 
mlx5_eq_async_int(struct notifier_block * nb,unsigned long action,void * data)197 static int mlx5_eq_async_int(struct notifier_block *nb,
198 			     unsigned long action, void *data)
199 {
200 	struct mlx5_eq_async *eq_async =
201 		container_of(nb, struct mlx5_eq_async, irq_nb);
202 	struct mlx5_eq *eq = &eq_async->core;
203 	struct mlx5_eq_table *eqt;
204 	struct mlx5_core_dev *dev;
205 	struct mlx5_eqe *eqe;
206 	unsigned long flags;
207 	int num_eqes = 0;
208 	bool recovery;
209 
210 	dev = eq->dev;
211 	eqt = dev->priv.eq_table;
212 
213 	recovery = action == ASYNC_EQ_RECOVER;
214 	mlx5_eq_async_int_lock(eq_async, recovery, &flags);
215 
216 	eqe = next_eqe_sw(eq);
217 	if (!eqe)
218 		goto out;
219 
220 	do {
221 		/*
222 		 * Make sure we read EQ entry contents after we've
223 		 * checked the ownership bit.
224 		 */
225 		dma_rmb();
226 
227 		atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
228 		atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
229 
230 		++eq->cons_index;
231 
232 	} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
233 
234 out:
235 	eq_update_ci(eq, 1);
236 	mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
237 
238 	return unlikely(recovery) ? num_eqes : 0;
239 }
240 
mlx5_cmd_eq_recover(struct mlx5_core_dev * dev)241 void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
242 {
243 	struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;
244 	int eqes;
245 
246 	eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL);
247 	if (eqes)
248 		mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes);
249 }
250 
init_eq_buf(struct mlx5_eq * eq)251 static void init_eq_buf(struct mlx5_eq *eq)
252 {
253 	struct mlx5_eqe *eqe;
254 	int i;
255 
256 	for (i = 0; i < eq_get_size(eq); i++) {
257 		eqe = get_eqe(eq, i);
258 		eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
259 	}
260 }
261 
262 static int
create_map_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct mlx5_eq_param * param)263 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
264 	      struct mlx5_eq_param *param)
265 {
266 	u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE);
267 	struct mlx5_cq_table *cq_table = &eq->cq_table;
268 	u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
269 	u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
270 	struct mlx5_priv *priv = &dev->priv;
271 	__be64 *pas;
272 	u16 vecidx;
273 	void *eqc;
274 	int inlen;
275 	u32 *in;
276 	int err;
277 	int i;
278 
279 	/* Init CQ table */
280 	memset(cq_table, 0, sizeof(*cq_table));
281 	spin_lock_init(&cq_table->lock);
282 	INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
283 
284 	eq->cons_index = 0;
285 
286 	err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride),
287 				       &eq->frag_buf, dev->priv.numa_node);
288 	if (err)
289 		return err;
290 
291 	mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
292 	init_eq_buf(eq);
293 
294 	eq->irq = param->irq;
295 	vecidx = mlx5_irq_get_index(eq->irq);
296 
297 	inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
298 		MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
299 
300 	in = kvzalloc(inlen, GFP_KERNEL);
301 	if (!in) {
302 		err = -ENOMEM;
303 		goto err_buf;
304 	}
305 
306 	pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
307 	mlx5_fill_page_frag_array(&eq->frag_buf, pas);
308 
309 	MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
310 	if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
311 		MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
312 
313 	for (i = 0; i < 4; i++)
314 		MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
315 				 param->mask[i]);
316 
317 	eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
318 	MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
319 	MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
320 	MLX5_SET(eqc, eqc, intr, vecidx);
321 	MLX5_SET(eqc, eqc, log_page_size,
322 		 eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
323 
324 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
325 	if (err)
326 		goto err_in;
327 
328 	eq->vecidx = vecidx;
329 	eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
330 	eq->irqn = pci_irq_vector(dev->pdev, vecidx);
331 	eq->dev = dev;
332 	eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
333 
334 	err = mlx5_debug_eq_add(dev, eq);
335 	if (err)
336 		goto err_eq;
337 
338 	kvfree(in);
339 	return 0;
340 
341 err_eq:
342 	mlx5_cmd_destroy_eq(dev, eq->eqn);
343 
344 err_in:
345 	kvfree(in);
346 
347 err_buf:
348 	mlx5_frag_buf_free(dev, &eq->frag_buf);
349 	return err;
350 }
351 
352 /**
353  * mlx5_eq_enable - Enable EQ for receiving EQEs
354  * @dev : Device which owns the eq
355  * @eq  : EQ to enable
356  * @nb  : Notifier call block
357  *
358  * Must be called after EQ is created in device.
359  *
360  * @return: 0 if no error
361  */
mlx5_eq_enable(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct notifier_block * nb)362 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
363 		   struct notifier_block *nb)
364 {
365 	int err;
366 
367 	err = mlx5_irq_attach_nb(eq->irq, nb);
368 	if (!err)
369 		eq_update_ci(eq, 1);
370 
371 	return err;
372 }
373 EXPORT_SYMBOL(mlx5_eq_enable);
374 
375 /**
376  * mlx5_eq_disable - Disable EQ for receiving EQEs
377  * @dev : Device which owns the eq
378  * @eq  : EQ to disable
379  * @nb  : Notifier call block
380  *
381  * Must be called before EQ is destroyed.
382  */
mlx5_eq_disable(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct notifier_block * nb)383 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
384 		     struct notifier_block *nb)
385 {
386 	mlx5_irq_detach_nb(eq->irq, nb);
387 }
388 EXPORT_SYMBOL(mlx5_eq_disable);
389 
destroy_unmap_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq)390 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
391 {
392 	int err;
393 
394 	mlx5_debug_eq_remove(dev, eq);
395 
396 	err = mlx5_cmd_destroy_eq(dev, eq->eqn);
397 	if (err)
398 		mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
399 			       eq->eqn);
400 
401 	mlx5_frag_buf_free(dev, &eq->frag_buf);
402 	return err;
403 }
404 
mlx5_eq_add_cq(struct mlx5_eq * eq,struct mlx5_core_cq * cq)405 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
406 {
407 	struct mlx5_cq_table *table = &eq->cq_table;
408 	int err;
409 
410 	spin_lock(&table->lock);
411 	err = radix_tree_insert(&table->tree, cq->cqn, cq);
412 	spin_unlock(&table->lock);
413 
414 	return err;
415 }
416 
mlx5_eq_del_cq(struct mlx5_eq * eq,struct mlx5_core_cq * cq)417 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
418 {
419 	struct mlx5_cq_table *table = &eq->cq_table;
420 	struct mlx5_core_cq *tmp;
421 
422 	spin_lock(&table->lock);
423 	tmp = radix_tree_delete(&table->tree, cq->cqn);
424 	spin_unlock(&table->lock);
425 
426 	if (!tmp) {
427 		mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
428 			      eq->eqn, cq->cqn);
429 		return;
430 	}
431 
432 	if (tmp != cq)
433 		mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
434 			      eq->eqn, cq->cqn);
435 }
436 
mlx5_eq_table_init(struct mlx5_core_dev * dev)437 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
438 {
439 	struct mlx5_eq_table *eq_table;
440 	int i;
441 
442 	eq_table = kvzalloc_node(sizeof(*eq_table), GFP_KERNEL,
443 				 dev->priv.numa_node);
444 	if (!eq_table)
445 		return -ENOMEM;
446 
447 	dev->priv.eq_table = eq_table;
448 
449 	mlx5_eq_debugfs_init(dev);
450 
451 	mutex_init(&eq_table->lock);
452 	for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
453 		ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
454 
455 	eq_table->irq_table = mlx5_irq_table_get(dev);
456 	return 0;
457 }
458 
mlx5_eq_table_cleanup(struct mlx5_core_dev * dev)459 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
460 {
461 	mlx5_eq_debugfs_cleanup(dev);
462 	kvfree(dev->priv.eq_table);
463 }
464 
465 /* Async EQs */
466 
create_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct mlx5_eq_param * param)467 static int create_async_eq(struct mlx5_core_dev *dev,
468 			   struct mlx5_eq *eq, struct mlx5_eq_param *param)
469 {
470 	struct mlx5_eq_table *eq_table = dev->priv.eq_table;
471 	int err;
472 
473 	mutex_lock(&eq_table->lock);
474 	err = create_map_eq(dev, eq, param);
475 	mutex_unlock(&eq_table->lock);
476 	return err;
477 }
478 
destroy_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq)479 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
480 {
481 	struct mlx5_eq_table *eq_table = dev->priv.eq_table;
482 	int err;
483 
484 	mutex_lock(&eq_table->lock);
485 	err = destroy_unmap_eq(dev, eq);
486 	mutex_unlock(&eq_table->lock);
487 	return err;
488 }
489 
cq_err_event_notifier(struct notifier_block * nb,unsigned long type,void * data)490 static int cq_err_event_notifier(struct notifier_block *nb,
491 				 unsigned long type, void *data)
492 {
493 	struct mlx5_eq_table *eqt;
494 	struct mlx5_core_cq *cq;
495 	struct mlx5_eqe *eqe;
496 	struct mlx5_eq *eq;
497 	u32 cqn;
498 
499 	/* type == MLX5_EVENT_TYPE_CQ_ERROR */
500 
501 	eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
502 	eq  = &eqt->async_eq.core;
503 	eqe = data;
504 
505 	cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
506 	mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
507 		       cqn, eqe->data.cq_err.syndrome);
508 
509 	cq = mlx5_eq_cq_get(eq, cqn);
510 	if (unlikely(!cq)) {
511 		mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
512 		return NOTIFY_OK;
513 	}
514 
515 	if (cq->event)
516 		cq->event(cq, type);
517 
518 	mlx5_cq_put(cq);
519 
520 	return NOTIFY_OK;
521 }
522 
gather_user_async_events(struct mlx5_core_dev * dev,u64 mask[4])523 static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
524 {
525 	__be64 *user_unaffiliated_events;
526 	__be64 *user_affiliated_events;
527 	int i;
528 
529 	user_affiliated_events =
530 		MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
531 	user_unaffiliated_events =
532 		MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
533 
534 	for (i = 0; i < 4; i++)
535 		mask[i] |= be64_to_cpu(user_affiliated_events[i] |
536 				       user_unaffiliated_events[i]);
537 }
538 
gather_async_events_mask(struct mlx5_core_dev * dev,u64 mask[4])539 static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
540 {
541 	u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
542 
543 	if (MLX5_VPORT_MANAGER(dev))
544 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
545 
546 	if (MLX5_CAP_GEN(dev, general_notification_event))
547 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
548 
549 	if (MLX5_CAP_GEN(dev, port_module_event))
550 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
551 	else
552 		mlx5_core_dbg(dev, "port_module_event is not set\n");
553 
554 	if (MLX5_PPS_CAP(dev))
555 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
556 
557 	if (MLX5_CAP_GEN(dev, fpga))
558 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
559 				    (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
560 	if (MLX5_CAP_GEN_MAX(dev, dct))
561 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
562 
563 	if (MLX5_CAP_GEN(dev, temp_warn_event))
564 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
565 
566 	if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
567 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
568 
569 	if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
570 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
571 
572 	if (mlx5_eswitch_is_funcs_handler(dev))
573 		async_event_mask |=
574 			(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
575 
576 	if (MLX5_CAP_GEN_MAX(dev, vhca_state))
577 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
578 
579 	if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
580 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
581 
582 	if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
583 		async_event_mask |=
584 			(1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
585 
586 	mask[0] = async_event_mask;
587 
588 	if (MLX5_CAP_GEN(dev, event_cap))
589 		gather_user_async_events(dev, mask);
590 }
591 
592 static int
setup_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq_async * eq,struct mlx5_eq_param * param,const char * name)593 setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
594 	       struct mlx5_eq_param *param, const char *name)
595 {
596 	int err;
597 
598 	eq->irq_nb.notifier_call = mlx5_eq_async_int;
599 	spin_lock_init(&eq->lock);
600 
601 	err = create_async_eq(dev, &eq->core, param);
602 	if (err) {
603 		mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
604 		return err;
605 	}
606 	err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
607 	if (err) {
608 		mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
609 		destroy_async_eq(dev, &eq->core);
610 	}
611 	return err;
612 }
613 
cleanup_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq_async * eq,const char * name)614 static void cleanup_async_eq(struct mlx5_core_dev *dev,
615 			     struct mlx5_eq_async *eq, const char *name)
616 {
617 	int err;
618 
619 	mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
620 	err = destroy_async_eq(dev, &eq->core);
621 	if (err)
622 		mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
623 			      name, err);
624 }
625 
async_eq_depth_devlink_param_get(struct mlx5_core_dev * dev)626 static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
627 {
628 	struct devlink *devlink = priv_to_devlink(dev);
629 	union devlink_param_value val;
630 	int err;
631 
632 	err = devl_param_driverinit_value_get(devlink,
633 					      DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
634 					      &val);
635 	if (!err)
636 		return val.vu32;
637 	mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
638 	return MLX5_NUM_ASYNC_EQE;
639 }
create_async_eqs(struct mlx5_core_dev * dev)640 static int create_async_eqs(struct mlx5_core_dev *dev)
641 {
642 	struct mlx5_eq_table *table = dev->priv.eq_table;
643 	struct mlx5_eq_param param = {};
644 	int err;
645 
646 	/* All the async_eqs are using single IRQ, request one IRQ and share its
647 	 * index among all the async_eqs of this device.
648 	 */
649 	table->ctrl_irq = mlx5_ctrl_irq_request(dev);
650 	if (IS_ERR(table->ctrl_irq))
651 		return PTR_ERR(table->ctrl_irq);
652 
653 	MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
654 	mlx5_eq_notifier_register(dev, &table->cq_err_nb);
655 
656 	param = (struct mlx5_eq_param) {
657 		.irq = table->ctrl_irq,
658 		.nent = MLX5_NUM_CMD_EQE,
659 		.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
660 	};
661 	mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
662 	err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
663 	if (err)
664 		goto err1;
665 
666 	mlx5_cmd_use_events(dev);
667 	mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
668 
669 	param = (struct mlx5_eq_param) {
670 		.irq = table->ctrl_irq,
671 		.nent = async_eq_depth_devlink_param_get(dev),
672 	};
673 
674 	gather_async_events_mask(dev, param.mask);
675 	err = setup_async_eq(dev, &table->async_eq, &param, "async");
676 	if (err)
677 		goto err2;
678 
679 	param = (struct mlx5_eq_param) {
680 		.irq = table->ctrl_irq,
681 		.nent = /* TODO: sriov max_vf + */ 1,
682 		.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
683 	};
684 
685 	err = setup_async_eq(dev, &table->pages_eq, &param, "pages");
686 	if (err)
687 		goto err3;
688 
689 	return 0;
690 
691 err3:
692 	cleanup_async_eq(dev, &table->async_eq, "async");
693 err2:
694 	mlx5_cmd_use_polling(dev);
695 	cleanup_async_eq(dev, &table->cmd_eq, "cmd");
696 err1:
697 	mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
698 	mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
699 	mlx5_ctrl_irq_release(table->ctrl_irq);
700 	return err;
701 }
702 
destroy_async_eqs(struct mlx5_core_dev * dev)703 static void destroy_async_eqs(struct mlx5_core_dev *dev)
704 {
705 	struct mlx5_eq_table *table = dev->priv.eq_table;
706 
707 	cleanup_async_eq(dev, &table->pages_eq, "pages");
708 	cleanup_async_eq(dev, &table->async_eq, "async");
709 	mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
710 	mlx5_cmd_use_polling(dev);
711 	cleanup_async_eq(dev, &table->cmd_eq, "cmd");
712 	mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
713 	mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
714 	mlx5_ctrl_irq_release(table->ctrl_irq);
715 }
716 
mlx5_get_async_eq(struct mlx5_core_dev * dev)717 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
718 {
719 	return &dev->priv.eq_table->async_eq.core;
720 }
721 
mlx5_eq_synchronize_async_irq(struct mlx5_core_dev * dev)722 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
723 {
724 	synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
725 }
726 
mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev * dev)727 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
728 {
729 	synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
730 }
731 
732 /* Generic EQ API for mlx5_core consumers
733  * Needed For RDMA ODP EQ for now
734  */
735 struct mlx5_eq *
mlx5_eq_create_generic(struct mlx5_core_dev * dev,struct mlx5_eq_param * param)736 mlx5_eq_create_generic(struct mlx5_core_dev *dev,
737 		       struct mlx5_eq_param *param)
738 {
739 	struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL,
740 					   dev->priv.numa_node);
741 	int err;
742 
743 	if (!eq)
744 		return ERR_PTR(-ENOMEM);
745 
746 	param->irq = dev->priv.eq_table->ctrl_irq;
747 	err = create_async_eq(dev, eq, param);
748 	if (err) {
749 		kvfree(eq);
750 		eq = ERR_PTR(err);
751 	}
752 
753 	return eq;
754 }
755 EXPORT_SYMBOL(mlx5_eq_create_generic);
756 
mlx5_eq_destroy_generic(struct mlx5_core_dev * dev,struct mlx5_eq * eq)757 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
758 {
759 	int err;
760 
761 	if (IS_ERR(eq))
762 		return -EINVAL;
763 
764 	err = destroy_async_eq(dev, eq);
765 	if (err)
766 		goto out;
767 
768 	kvfree(eq);
769 out:
770 	return err;
771 }
772 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
773 
mlx5_eq_get_eqe(struct mlx5_eq * eq,u32 cc)774 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
775 {
776 	u32 ci = eq->cons_index + cc;
777 	u32 nent = eq_get_size(eq);
778 	struct mlx5_eqe *eqe;
779 
780 	eqe = get_eqe(eq, ci & (nent - 1));
781 	eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe;
782 	/* Make sure we read EQ entry contents after we've
783 	 * checked the ownership bit.
784 	 */
785 	if (eqe)
786 		dma_rmb();
787 
788 	return eqe;
789 }
790 EXPORT_SYMBOL(mlx5_eq_get_eqe);
791 
mlx5_eq_update_ci(struct mlx5_eq * eq,u32 cc,bool arm)792 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
793 {
794 	__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
795 	u32 val;
796 
797 	eq->cons_index += cc;
798 	val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
799 
800 	__raw_writel((__force u32)cpu_to_be32(val), addr);
801 	/* We still want ordering, just not swabbing, so add a barrier */
802 	wmb();
803 }
804 EXPORT_SYMBOL(mlx5_eq_update_ci);
805 
comp_irqs_release(struct mlx5_core_dev * dev)806 static void comp_irqs_release(struct mlx5_core_dev *dev)
807 {
808 	struct mlx5_eq_table *table = dev->priv.eq_table;
809 
810 	if (mlx5_core_is_sf(dev))
811 		mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->num_comp_eqs);
812 	else
813 		mlx5_irqs_release_vectors(table->comp_irqs, table->num_comp_eqs);
814 	kfree(table->comp_irqs);
815 }
816 
comp_irqs_request(struct mlx5_core_dev * dev)817 static int comp_irqs_request(struct mlx5_core_dev *dev)
818 {
819 	struct mlx5_eq_table *table = dev->priv.eq_table;
820 	const struct cpumask *prev = cpu_none_mask;
821 	const struct cpumask *mask;
822 	int ncomp_eqs = table->num_comp_eqs;
823 	u16 *cpus;
824 	int ret;
825 	int cpu;
826 	int i;
827 
828 	ncomp_eqs = table->num_comp_eqs;
829 	table->comp_irqs = kcalloc(ncomp_eqs, sizeof(*table->comp_irqs), GFP_KERNEL);
830 	if (!table->comp_irqs)
831 		return -ENOMEM;
832 	if (mlx5_core_is_sf(dev)) {
833 		ret = mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs);
834 		if (ret < 0)
835 			goto free_irqs;
836 		return ret;
837 	}
838 
839 	cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
840 	if (!cpus) {
841 		ret = -ENOMEM;
842 		goto free_irqs;
843 	}
844 
845 	i = 0;
846 	rcu_read_lock();
847 	for_each_numa_hop_mask(mask, dev->priv.numa_node) {
848 		for_each_cpu_andnot(cpu, mask, prev) {
849 			cpus[i] = cpu;
850 			if (++i == ncomp_eqs)
851 				goto spread_done;
852 		}
853 		prev = mask;
854 	}
855 spread_done:
856 	rcu_read_unlock();
857 	ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs);
858 	kfree(cpus);
859 	if (ret < 0)
860 		goto free_irqs;
861 	return ret;
862 
863 free_irqs:
864 	kfree(table->comp_irqs);
865 	return ret;
866 }
867 
destroy_comp_eqs(struct mlx5_core_dev * dev)868 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
869 {
870 	struct mlx5_eq_table *table = dev->priv.eq_table;
871 	struct mlx5_eq_comp *eq, *n;
872 
873 	list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
874 		list_del(&eq->list);
875 		mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
876 		if (destroy_unmap_eq(dev, &eq->core))
877 			mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
878 				       eq->core.eqn);
879 		tasklet_disable(&eq->tasklet_ctx.task);
880 		kfree(eq);
881 	}
882 	comp_irqs_release(dev);
883 }
884 
comp_eq_depth_devlink_param_get(struct mlx5_core_dev * dev)885 static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
886 {
887 	struct devlink *devlink = priv_to_devlink(dev);
888 	union devlink_param_value val;
889 	int err;
890 
891 	err = devl_param_driverinit_value_get(devlink,
892 					      DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
893 					      &val);
894 	if (!err)
895 		return val.vu32;
896 	mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
897 	return MLX5_COMP_EQ_SIZE;
898 }
899 
create_comp_eqs(struct mlx5_core_dev * dev)900 static int create_comp_eqs(struct mlx5_core_dev *dev)
901 {
902 	struct mlx5_eq_table *table = dev->priv.eq_table;
903 	struct mlx5_eq_comp *eq;
904 	int ncomp_eqs;
905 	int nent;
906 	int err;
907 	int i;
908 
909 	ncomp_eqs = comp_irqs_request(dev);
910 	if (ncomp_eqs < 0)
911 		return ncomp_eqs;
912 	INIT_LIST_HEAD(&table->comp_eqs_list);
913 	nent = comp_eq_depth_devlink_param_get(dev);
914 
915 	for (i = 0; i < ncomp_eqs; i++) {
916 		struct mlx5_eq_param param = {};
917 
918 		eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
919 		if (!eq) {
920 			err = -ENOMEM;
921 			goto clean;
922 		}
923 
924 		INIT_LIST_HEAD(&eq->tasklet_ctx.list);
925 		INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
926 		spin_lock_init(&eq->tasklet_ctx.lock);
927 		tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
928 
929 		eq->irq_nb.notifier_call = mlx5_eq_comp_int;
930 		param = (struct mlx5_eq_param) {
931 			.irq = table->comp_irqs[i],
932 			.nent = nent,
933 		};
934 
935 		err = create_map_eq(dev, &eq->core, &param);
936 		if (err)
937 			goto clean_eq;
938 		err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
939 		if (err) {
940 			destroy_unmap_eq(dev, &eq->core);
941 			goto clean_eq;
942 		}
943 
944 		mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
945 		/* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
946 		list_add_tail(&eq->list, &table->comp_eqs_list);
947 	}
948 
949 	table->num_comp_eqs = ncomp_eqs;
950 	return 0;
951 
952 clean_eq:
953 	kfree(eq);
954 clean:
955 	destroy_comp_eqs(dev);
956 	return err;
957 }
958 
vector2eqnirqn(struct mlx5_core_dev * dev,int vector,int * eqn,unsigned int * irqn)959 static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
960 			  unsigned int *irqn)
961 {
962 	struct mlx5_eq_table *table = dev->priv.eq_table;
963 	struct mlx5_eq_comp *eq;
964 	int err = -ENOENT;
965 	int i = 0;
966 
967 	list_for_each_entry(eq, &table->comp_eqs_list, list) {
968 		if (i++ == vector) {
969 			if (irqn)
970 				*irqn = eq->core.irqn;
971 			if (eqn)
972 				*eqn = eq->core.eqn;
973 			err = 0;
974 			break;
975 		}
976 	}
977 
978 	return err;
979 }
980 
mlx5_vector2eqn(struct mlx5_core_dev * dev,int vector,int * eqn)981 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
982 {
983 	return vector2eqnirqn(dev, vector, eqn, NULL);
984 }
985 EXPORT_SYMBOL(mlx5_vector2eqn);
986 
mlx5_vector2irqn(struct mlx5_core_dev * dev,int vector,unsigned int * irqn)987 int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
988 {
989 	return vector2eqnirqn(dev, vector, NULL, irqn);
990 }
991 
mlx5_comp_vectors_count(struct mlx5_core_dev * dev)992 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
993 {
994 	return dev->priv.eq_table->num_comp_eqs;
995 }
996 EXPORT_SYMBOL(mlx5_comp_vectors_count);
997 
998 struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev * dev,int vector)999 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
1000 {
1001 	struct mlx5_eq_table *table = dev->priv.eq_table;
1002 	struct mlx5_eq_comp *eq;
1003 	int i = 0;
1004 
1005 	list_for_each_entry(eq, &table->comp_eqs_list, list) {
1006 		if (i++ == vector)
1007 			break;
1008 	}
1009 
1010 	return mlx5_irq_get_affinity_mask(eq->core.irq);
1011 }
1012 EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
1013 
1014 #ifdef CONFIG_RFS_ACCEL
mlx5_eq_table_get_rmap(struct mlx5_core_dev * dev)1015 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
1016 {
1017 	return dev->priv.eq_table->rmap;
1018 }
1019 #endif
1020 
mlx5_eqn2comp_eq(struct mlx5_core_dev * dev,int eqn)1021 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
1022 {
1023 	struct mlx5_eq_table *table = dev->priv.eq_table;
1024 	struct mlx5_eq_comp *eq;
1025 
1026 	list_for_each_entry(eq, &table->comp_eqs_list, list) {
1027 		if (eq->core.eqn == eqn)
1028 			return eq;
1029 	}
1030 
1031 	return ERR_PTR(-ENOENT);
1032 }
1033 
clear_rmap(struct mlx5_core_dev * dev)1034 static void clear_rmap(struct mlx5_core_dev *dev)
1035 {
1036 #ifdef CONFIG_RFS_ACCEL
1037 	struct mlx5_eq_table *eq_table = dev->priv.eq_table;
1038 
1039 	free_irq_cpu_rmap(eq_table->rmap);
1040 #endif
1041 }
1042 
set_rmap(struct mlx5_core_dev * mdev)1043 static int set_rmap(struct mlx5_core_dev *mdev)
1044 {
1045 	int err = 0;
1046 #ifdef CONFIG_RFS_ACCEL
1047 	struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
1048 	int vecidx;
1049 
1050 	eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
1051 	if (!eq_table->rmap) {
1052 		err = -ENOMEM;
1053 		mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
1054 		goto err_out;
1055 	}
1056 
1057 	for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) {
1058 		err = irq_cpu_rmap_add(eq_table->rmap,
1059 				       pci_irq_vector(mdev->pdev, vecidx));
1060 		if (err) {
1061 			mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
1062 				      err);
1063 			goto err_irq_cpu_rmap_add;
1064 		}
1065 	}
1066 	return 0;
1067 
1068 err_irq_cpu_rmap_add:
1069 	clear_rmap(mdev);
1070 err_out:
1071 #endif
1072 	return err;
1073 }
1074 
1075 /* This function should only be called after mlx5_cmd_force_teardown_hca */
mlx5_core_eq_free_irqs(struct mlx5_core_dev * dev)1076 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
1077 {
1078 	struct mlx5_eq_table *table = dev->priv.eq_table;
1079 
1080 	mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
1081 	if (!mlx5_core_is_sf(dev))
1082 		clear_rmap(dev);
1083 	mlx5_irq_table_destroy(dev);
1084 	mutex_unlock(&table->lock);
1085 }
1086 
1087 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1088 #define MLX5_MAX_ASYNC_EQS 4
1089 #else
1090 #define MLX5_MAX_ASYNC_EQS 3
1091 #endif
1092 
mlx5_eq_table_create(struct mlx5_core_dev * dev)1093 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
1094 {
1095 	struct mlx5_eq_table *eq_table = dev->priv.eq_table;
1096 	int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
1097 		      MLX5_CAP_GEN(dev, max_num_eqs) :
1098 		      1 << MLX5_CAP_GEN(dev, log_max_eq);
1099 	int max_eqs_sf;
1100 	int err;
1101 
1102 	eq_table->num_comp_eqs =
1103 		min_t(int,
1104 		      mlx5_irq_table_get_num_comp(eq_table->irq_table),
1105 		      num_eqs - MLX5_MAX_ASYNC_EQS);
1106 	if (mlx5_core_is_sf(dev)) {
1107 		max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF,
1108 				   mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
1109 		eq_table->num_comp_eqs = min_t(int, eq_table->num_comp_eqs,
1110 					       max_eqs_sf);
1111 	}
1112 
1113 	err = create_async_eqs(dev);
1114 	if (err) {
1115 		mlx5_core_err(dev, "Failed to create async EQs\n");
1116 		goto err_async_eqs;
1117 	}
1118 
1119 	if (!mlx5_core_is_sf(dev)) {
1120 		/* rmap is a mapping between irq number and queue number.
1121 		 * each irq can be assign only to a single rmap.
1122 		 * since SFs share IRQs, rmap mapping cannot function correctly
1123 		 * for irqs that are shared for different core/netdev RX rings.
1124 		 * Hence we don't allow netdev rmap for SFs
1125 		 */
1126 		err = set_rmap(dev);
1127 		if (err)
1128 			goto err_rmap;
1129 	}
1130 
1131 	err = create_comp_eqs(dev);
1132 	if (err) {
1133 		mlx5_core_err(dev, "Failed to create completion EQs\n");
1134 		goto err_comp_eqs;
1135 	}
1136 
1137 	return 0;
1138 err_comp_eqs:
1139 	if (!mlx5_core_is_sf(dev))
1140 		clear_rmap(dev);
1141 err_rmap:
1142 	destroy_async_eqs(dev);
1143 err_async_eqs:
1144 	return err;
1145 }
1146 
mlx5_eq_table_destroy(struct mlx5_core_dev * dev)1147 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
1148 {
1149 	if (!mlx5_core_is_sf(dev))
1150 		clear_rmap(dev);
1151 	destroy_comp_eqs(dev);
1152 	destroy_async_eqs(dev);
1153 }
1154 
mlx5_eq_notifier_register(struct mlx5_core_dev * dev,struct mlx5_nb * nb)1155 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1156 {
1157 	struct mlx5_eq_table *eqt = dev->priv.eq_table;
1158 
1159 	return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
1160 }
1161 EXPORT_SYMBOL(mlx5_eq_notifier_register);
1162 
mlx5_eq_notifier_unregister(struct mlx5_core_dev * dev,struct mlx5_nb * nb)1163 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
1164 {
1165 	struct mlx5_eq_table *eqt = dev->priv.eq_table;
1166 
1167 	return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
1168 }
1169 EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
1170