1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/interrupt.h>
5 #include <linux/notifier.h>
6 #include <linux/mlx5/driver.h>
7 #include <linux/mlx5/vport.h>
8 #include "mlx5_core.h"
9 #include "mlx5_irq.h"
10 #include "pci_irq.h"
11 #include "lib/sf.h"
12 #ifdef CONFIG_RFS_ACCEL
13 #include <linux/cpu_rmap.h>
14 #endif
15 
16 #define MLX5_SFS_PER_CTRL_IRQ 64
17 #define MLX5_IRQ_CTRL_SF_MAX 8
18 /* min num of vectors for SFs to be enabled */
19 #define MLX5_IRQ_VEC_COMP_BASE_SF 2
20 
21 #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
22 #define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
23 #define MLX5_EQ_SHARE_IRQ_MIN_COMP (1)
24 #define MLX5_EQ_SHARE_IRQ_MIN_CTRL (4)
25 
26 struct mlx5_irq {
27 	struct atomic_notifier_head nh;
28 	cpumask_var_t mask;
29 	char name[MLX5_MAX_IRQ_NAME];
30 	struct mlx5_irq_pool *pool;
31 	int refcount;
32 	u32 index;
33 	int irqn;
34 };
35 
36 struct mlx5_irq_table {
37 	struct mlx5_irq_pool *pf_pool;
38 	struct mlx5_irq_pool *sf_ctrl_pool;
39 	struct mlx5_irq_pool *sf_comp_pool;
40 };
41 
42 /**
43  * mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
44  *                                   to be ssigned to each VF.
45  * @dev: PF to work on
46  * @num_vfs: Number of enabled VFs
47  */
mlx5_get_default_msix_vec_count(struct mlx5_core_dev * dev,int num_vfs)48 int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
49 {
50 	int num_vf_msix, min_msix, max_msix;
51 
52 	num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
53 	if (!num_vf_msix)
54 		return 0;
55 
56 	min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
57 	max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
58 
59 	/* Limit maximum number of MSI-X vectors so the default configuration
60 	 * has some available in the pool. This will allow the user to increase
61 	 * the number of vectors in a VF without having to first size-down other
62 	 * VFs.
63 	 */
64 	return max(min(num_vf_msix / num_vfs, max_msix / 2), min_msix);
65 }
66 
67 /**
68  * mlx5_set_msix_vec_count - Set dynamically allocated MSI-X on the VF
69  * @dev: PF to work on
70  * @function_id: Internal PCI VF function IDd
71  * @msix_vec_count: Number of MSI-X vectors to set
72  */
mlx5_set_msix_vec_count(struct mlx5_core_dev * dev,int function_id,int msix_vec_count)73 int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
74 			    int msix_vec_count)
75 {
76 	int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
77 	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
78 	void *hca_cap = NULL, *query_cap = NULL, *cap;
79 	int num_vf_msix, min_msix, max_msix;
80 	int ret;
81 
82 	num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
83 	if (!num_vf_msix)
84 		return 0;
85 
86 	if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev))
87 		return -EOPNOTSUPP;
88 
89 	min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
90 	max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
91 
92 	if (msix_vec_count < min_msix)
93 		return -EINVAL;
94 
95 	if (msix_vec_count > max_msix)
96 		return -EOVERFLOW;
97 
98 	query_cap = kvzalloc(query_sz, GFP_KERNEL);
99 	hca_cap = kvzalloc(set_sz, GFP_KERNEL);
100 	if (!hca_cap || !query_cap) {
101 		ret = -ENOMEM;
102 		goto out;
103 	}
104 
105 	ret = mlx5_vport_get_other_func_general_cap(dev, function_id, query_cap);
106 	if (ret)
107 		goto out;
108 
109 	cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
110 	memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
111 	       MLX5_UN_SZ_BYTES(hca_cap_union));
112 	MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
113 
114 	MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
115 	MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
116 	MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id);
117 
118 	MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
119 		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
120 	ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
121 out:
122 	kvfree(hca_cap);
123 	kvfree(query_cap);
124 	return ret;
125 }
126 
irq_release(struct mlx5_irq * irq)127 static void irq_release(struct mlx5_irq *irq)
128 {
129 	struct mlx5_irq_pool *pool = irq->pool;
130 
131 	xa_erase(&pool->irqs, irq->index);
132 	/* free_irq requires that affinity_hint and rmap will be cleared
133 	 * before calling it. This is why there is asymmetry with set_rmap
134 	 * which should be called after alloc_irq but before request_irq.
135 	 */
136 	irq_update_affinity_hint(irq->irqn, NULL);
137 	free_cpumask_var(irq->mask);
138 	free_irq(irq->irqn, &irq->nh);
139 	kfree(irq);
140 }
141 
mlx5_irq_put(struct mlx5_irq * irq)142 int mlx5_irq_put(struct mlx5_irq *irq)
143 {
144 	struct mlx5_irq_pool *pool = irq->pool;
145 	int ret = 0;
146 
147 	mutex_lock(&pool->lock);
148 	irq->refcount--;
149 	if (!irq->refcount) {
150 		irq_release(irq);
151 		ret = 1;
152 	}
153 	mutex_unlock(&pool->lock);
154 	return ret;
155 }
156 
mlx5_irq_read_locked(struct mlx5_irq * irq)157 int mlx5_irq_read_locked(struct mlx5_irq *irq)
158 {
159 	lockdep_assert_held(&irq->pool->lock);
160 	return irq->refcount;
161 }
162 
mlx5_irq_get_locked(struct mlx5_irq * irq)163 int mlx5_irq_get_locked(struct mlx5_irq *irq)
164 {
165 	lockdep_assert_held(&irq->pool->lock);
166 	if (WARN_ON_ONCE(!irq->refcount))
167 		return 0;
168 	irq->refcount++;
169 	return 1;
170 }
171 
irq_get(struct mlx5_irq * irq)172 static int irq_get(struct mlx5_irq *irq)
173 {
174 	int err;
175 
176 	mutex_lock(&irq->pool->lock);
177 	err = mlx5_irq_get_locked(irq);
178 	mutex_unlock(&irq->pool->lock);
179 	return err;
180 }
181 
irq_int_handler(int irq,void * nh)182 static irqreturn_t irq_int_handler(int irq, void *nh)
183 {
184 	atomic_notifier_call_chain(nh, 0, NULL);
185 	return IRQ_HANDLED;
186 }
187 
irq_sf_set_name(struct mlx5_irq_pool * pool,char * name,int vecidx)188 static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
189 {
190 	snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
191 }
192 
irq_set_name(struct mlx5_irq_pool * pool,char * name,int vecidx)193 static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
194 {
195 	if (!pool->xa_num_irqs.max) {
196 		/* in case we only have a single irq for the device */
197 		snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx);
198 		return;
199 	}
200 
201 	if (vecidx == pool->xa_num_irqs.max) {
202 		snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
203 		return;
204 	}
205 
206 	snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
207 }
208 
mlx5_irq_alloc(struct mlx5_irq_pool * pool,int i,const struct cpumask * affinity)209 struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
210 				const struct cpumask *affinity)
211 {
212 	struct mlx5_core_dev *dev = pool->dev;
213 	char name[MLX5_MAX_IRQ_NAME];
214 	struct mlx5_irq *irq;
215 	int err;
216 
217 	irq = kzalloc(sizeof(*irq), GFP_KERNEL);
218 	if (!irq)
219 		return ERR_PTR(-ENOMEM);
220 	irq->irqn = pci_irq_vector(dev->pdev, i);
221 	if (!mlx5_irq_pool_is_sf_pool(pool))
222 		irq_set_name(pool, name, i);
223 	else
224 		irq_sf_set_name(pool, name, i);
225 	ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
226 	snprintf(irq->name, MLX5_MAX_IRQ_NAME,
227 		 "%s@pci:%s", name, pci_name(dev->pdev));
228 	err = request_irq(irq->irqn, irq_int_handler, 0, irq->name,
229 			  &irq->nh);
230 	if (err) {
231 		mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
232 		goto err_req_irq;
233 	}
234 	if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
235 		mlx5_core_warn(dev, "zalloc_cpumask_var failed\n");
236 		err = -ENOMEM;
237 		goto err_cpumask;
238 	}
239 	if (affinity) {
240 		cpumask_copy(irq->mask, affinity);
241 		irq_set_affinity_and_hint(irq->irqn, irq->mask);
242 	}
243 	irq->pool = pool;
244 	irq->refcount = 1;
245 	irq->index = i;
246 	err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
247 	if (err) {
248 		mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
249 			      irq->index, err);
250 		goto err_xa;
251 	}
252 	return irq;
253 err_xa:
254 	irq_update_affinity_hint(irq->irqn, NULL);
255 	free_cpumask_var(irq->mask);
256 err_cpumask:
257 	free_irq(irq->irqn, &irq->nh);
258 err_req_irq:
259 	kfree(irq);
260 	return ERR_PTR(err);
261 }
262 
mlx5_irq_attach_nb(struct mlx5_irq * irq,struct notifier_block * nb)263 int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
264 {
265 	int ret;
266 
267 	ret = irq_get(irq);
268 	if (!ret)
269 		/* Something very bad happens here, we are enabling EQ
270 		 * on non-existing IRQ.
271 		 */
272 		return -ENOENT;
273 	ret = atomic_notifier_chain_register(&irq->nh, nb);
274 	if (ret)
275 		mlx5_irq_put(irq);
276 	return ret;
277 }
278 
mlx5_irq_detach_nb(struct mlx5_irq * irq,struct notifier_block * nb)279 int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
280 {
281 	int err = 0;
282 
283 	err = atomic_notifier_chain_unregister(&irq->nh, nb);
284 	mlx5_irq_put(irq);
285 	return err;
286 }
287 
mlx5_irq_get_affinity_mask(struct mlx5_irq * irq)288 struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
289 {
290 	return irq->mask;
291 }
292 
mlx5_irq_get_index(struct mlx5_irq * irq)293 int mlx5_irq_get_index(struct mlx5_irq *irq)
294 {
295 	return irq->index;
296 }
297 
298 /* irq_pool API */
299 
300 /* requesting an irq from a given pool according to given index */
301 static struct mlx5_irq *
irq_pool_request_vector(struct mlx5_irq_pool * pool,int vecidx,struct cpumask * affinity)302 irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
303 			struct cpumask *affinity)
304 {
305 	struct mlx5_irq *irq;
306 
307 	mutex_lock(&pool->lock);
308 	irq = xa_load(&pool->irqs, vecidx);
309 	if (irq) {
310 		mlx5_irq_get_locked(irq);
311 		goto unlock;
312 	}
313 	irq = mlx5_irq_alloc(pool, vecidx, affinity);
314 unlock:
315 	mutex_unlock(&pool->lock);
316 	return irq;
317 }
318 
sf_ctrl_irq_pool_get(struct mlx5_irq_table * irq_table)319 static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_table)
320 {
321 	return irq_table->sf_ctrl_pool;
322 }
323 
sf_irq_pool_get(struct mlx5_irq_table * irq_table)324 static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
325 {
326 	return irq_table->sf_comp_pool;
327 }
328 
mlx5_irq_pool_get(struct mlx5_core_dev * dev)329 struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
330 {
331 	struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
332 	struct mlx5_irq_pool *pool = NULL;
333 
334 	if (mlx5_core_is_sf(dev))
335 		pool = sf_irq_pool_get(irq_table);
336 
337 	/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
338 	 * the PF IRQs pool in case the SF pool doesn't exist.
339 	 */
340 	return pool ? pool : irq_table->pf_pool;
341 }
342 
ctrl_irq_pool_get(struct mlx5_core_dev * dev)343 static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
344 {
345 	struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
346 	struct mlx5_irq_pool *pool = NULL;
347 
348 	if (mlx5_core_is_sf(dev))
349 		pool = sf_ctrl_irq_pool_get(irq_table);
350 
351 	/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
352 	 * the PF IRQs pool in case the SF pool doesn't exist.
353 	 */
354 	return pool ? pool : irq_table->pf_pool;
355 }
356 
357 /**
358  * mlx5_irqs_release - release one or more IRQs back to the system.
359  * @irqs: IRQs to be released.
360  * @nirqs: number of IRQs to be released.
361  */
mlx5_irqs_release(struct mlx5_irq ** irqs,int nirqs)362 static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
363 {
364 	int i;
365 
366 	for (i = 0; i < nirqs; i++) {
367 		synchronize_irq(irqs[i]->irqn);
368 		mlx5_irq_put(irqs[i]);
369 	}
370 }
371 
372 /**
373  * mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
374  * @ctrl_irq: ctrl IRQ to be released.
375  */
mlx5_ctrl_irq_release(struct mlx5_irq * ctrl_irq)376 void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
377 {
378 	mlx5_irqs_release(&ctrl_irq, 1);
379 }
380 
381 /**
382  * mlx5_ctrl_irq_request - request a ctrl IRQ for mlx5 device.
383  * @dev: mlx5 device that requesting the IRQ.
384  *
385  * This function returns a pointer to IRQ, or ERR_PTR in case of error.
386  */
mlx5_ctrl_irq_request(struct mlx5_core_dev * dev)387 struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
388 {
389 	struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
390 	cpumask_var_t req_mask;
391 	struct mlx5_irq *irq;
392 
393 	if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
394 		return ERR_PTR(-ENOMEM);
395 	cpumask_copy(req_mask, cpu_online_mask);
396 	if (!mlx5_irq_pool_is_sf_pool(pool)) {
397 		/* In case we are allocating a control IRQ for PF/VF */
398 		if (!pool->xa_num_irqs.max) {
399 			cpumask_clear(req_mask);
400 			/* In case we only have a single IRQ for PF/VF */
401 			cpumask_set_cpu(cpumask_first(cpu_online_mask), req_mask);
402 		}
403 		/* Allocate the IRQ in the last index of the pool */
404 		irq = irq_pool_request_vector(pool, pool->xa_num_irqs.max, req_mask);
405 	} else {
406 		irq = mlx5_irq_affinity_request(pool, req_mask);
407 	}
408 
409 	free_cpumask_var(req_mask);
410 	return irq;
411 }
412 
413 /**
414  * mlx5_irq_request - request an IRQ for mlx5 PF/VF device.
415  * @dev: mlx5 device that requesting the IRQ.
416  * @vecidx: vector index of the IRQ. This argument is ignore if affinity is
417  * provided.
418  * @affinity: cpumask requested for this IRQ.
419  *
420  * This function returns a pointer to IRQ, or ERR_PTR in case of error.
421  */
mlx5_irq_request(struct mlx5_core_dev * dev,u16 vecidx,struct cpumask * affinity)422 struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
423 				  struct cpumask *affinity)
424 {
425 	struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
426 	struct mlx5_irq_pool *pool;
427 	struct mlx5_irq *irq;
428 
429 	pool = irq_table->pf_pool;
430 	irq = irq_pool_request_vector(pool, vecidx, affinity);
431 	if (IS_ERR(irq))
432 		return irq;
433 	mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
434 		      irq->irqn, cpumask_pr_args(affinity),
435 		      irq->refcount / MLX5_EQ_REFS_PER_IRQ);
436 	return irq;
437 }
438 
439 /**
440  * mlx5_irqs_release_vectors - release one or more IRQs back to the system.
441  * @irqs: IRQs to be released.
442  * @nirqs: number of IRQs to be released.
443  */
mlx5_irqs_release_vectors(struct mlx5_irq ** irqs,int nirqs)444 void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
445 {
446 	mlx5_irqs_release(irqs, nirqs);
447 }
448 
449 /**
450  * mlx5_irqs_request_vectors - request one or more IRQs for mlx5 device.
451  * @dev: mlx5 device that is requesting the IRQs.
452  * @cpus: CPUs array for binding the IRQs
453  * @nirqs: number of IRQs to request.
454  * @irqs: an output array of IRQs pointers.
455  *
456  * Each IRQ is bound to at most 1 CPU.
457  * This function is requests nirqs IRQs, starting from @vecidx.
458  *
459  * This function returns the number of IRQs requested, (which might be smaller than
460  * @nirqs), if successful, or a negative error code in case of an error.
461  */
mlx5_irqs_request_vectors(struct mlx5_core_dev * dev,u16 * cpus,int nirqs,struct mlx5_irq ** irqs)462 int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
463 			      struct mlx5_irq **irqs)
464 {
465 	cpumask_var_t req_mask;
466 	struct mlx5_irq *irq;
467 	int i;
468 
469 	if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
470 		return -ENOMEM;
471 	for (i = 0; i < nirqs; i++) {
472 		cpumask_set_cpu(cpus[i], req_mask);
473 		irq = mlx5_irq_request(dev, i, req_mask);
474 		if (IS_ERR(irq))
475 			break;
476 		cpumask_clear(req_mask);
477 		irqs[i] = irq;
478 	}
479 
480 	free_cpumask_var(req_mask);
481 	return i ? i : PTR_ERR(irq);
482 }
483 
484 static struct mlx5_irq_pool *
irq_pool_alloc(struct mlx5_core_dev * dev,int start,int size,char * name,u32 min_threshold,u32 max_threshold)485 irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
486 	       u32 min_threshold, u32 max_threshold)
487 {
488 	struct mlx5_irq_pool *pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
489 
490 	if (!pool)
491 		return ERR_PTR(-ENOMEM);
492 	pool->dev = dev;
493 	mutex_init(&pool->lock);
494 	xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
495 	pool->xa_num_irqs.min = start;
496 	pool->xa_num_irqs.max = start + size - 1;
497 	if (name)
498 		snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
499 			 "%s", name);
500 	pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
501 	pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
502 	mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
503 		      name, size, start);
504 	return pool;
505 }
506 
irq_pool_free(struct mlx5_irq_pool * pool)507 static void irq_pool_free(struct mlx5_irq_pool *pool)
508 {
509 	struct mlx5_irq *irq;
510 	unsigned long index;
511 
512 	/* There are cases in which we are destrying the irq_table before
513 	 * freeing all the IRQs, fast teardown for example. Hence, free the irqs
514 	 * which might not have been freed.
515 	 */
516 	xa_for_each(&pool->irqs, index, irq)
517 		irq_release(irq);
518 	xa_destroy(&pool->irqs);
519 	mutex_destroy(&pool->lock);
520 	kfree(pool->irqs_per_cpu);
521 	kvfree(pool);
522 }
523 
irq_pools_init(struct mlx5_core_dev * dev,int sf_vec,int pf_vec)524 static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
525 {
526 	struct mlx5_irq_table *table = dev->priv.irq_table;
527 	int num_sf_ctrl_by_msix;
528 	int num_sf_ctrl_by_sfs;
529 	int num_sf_ctrl;
530 	int err;
531 
532 	/* init pf_pool */
533 	table->pf_pool = irq_pool_alloc(dev, 0, pf_vec, NULL,
534 					MLX5_EQ_SHARE_IRQ_MIN_COMP,
535 					MLX5_EQ_SHARE_IRQ_MAX_COMP);
536 	if (IS_ERR(table->pf_pool))
537 		return PTR_ERR(table->pf_pool);
538 	if (!mlx5_sf_max_functions(dev))
539 		return 0;
540 	if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
541 		mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
542 		return 0;
543 	}
544 
545 	/* init sf_ctrl_pool */
546 	num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF);
547 	num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
548 					  MLX5_SFS_PER_CTRL_IRQ);
549 	num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs);
550 	num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
551 	table->sf_ctrl_pool = irq_pool_alloc(dev, pf_vec, num_sf_ctrl,
552 					     "mlx5_sf_ctrl",
553 					     MLX5_EQ_SHARE_IRQ_MIN_CTRL,
554 					     MLX5_EQ_SHARE_IRQ_MAX_CTRL);
555 	if (IS_ERR(table->sf_ctrl_pool)) {
556 		err = PTR_ERR(table->sf_ctrl_pool);
557 		goto err_pf;
558 	}
559 	/* init sf_comp_pool */
560 	table->sf_comp_pool = irq_pool_alloc(dev, pf_vec + num_sf_ctrl,
561 					     sf_vec - num_sf_ctrl, "mlx5_sf_comp",
562 					     MLX5_EQ_SHARE_IRQ_MIN_COMP,
563 					     MLX5_EQ_SHARE_IRQ_MAX_COMP);
564 	if (IS_ERR(table->sf_comp_pool)) {
565 		err = PTR_ERR(table->sf_comp_pool);
566 		goto err_sf_ctrl;
567 	}
568 
569 	table->sf_comp_pool->irqs_per_cpu = kcalloc(nr_cpu_ids, sizeof(u16), GFP_KERNEL);
570 	if (!table->sf_comp_pool->irqs_per_cpu) {
571 		err = -ENOMEM;
572 		goto err_irqs_per_cpu;
573 	}
574 
575 	return 0;
576 
577 err_irqs_per_cpu:
578 	irq_pool_free(table->sf_comp_pool);
579 err_sf_ctrl:
580 	irq_pool_free(table->sf_ctrl_pool);
581 err_pf:
582 	irq_pool_free(table->pf_pool);
583 	return err;
584 }
585 
irq_pools_destroy(struct mlx5_irq_table * table)586 static void irq_pools_destroy(struct mlx5_irq_table *table)
587 {
588 	if (table->sf_ctrl_pool) {
589 		irq_pool_free(table->sf_comp_pool);
590 		irq_pool_free(table->sf_ctrl_pool);
591 	}
592 	irq_pool_free(table->pf_pool);
593 }
594 
595 /* irq_table API */
596 
mlx5_irq_table_init(struct mlx5_core_dev * dev)597 int mlx5_irq_table_init(struct mlx5_core_dev *dev)
598 {
599 	struct mlx5_irq_table *irq_table;
600 
601 	if (mlx5_core_is_sf(dev))
602 		return 0;
603 
604 	irq_table = kvzalloc_node(sizeof(*irq_table), GFP_KERNEL,
605 				  dev->priv.numa_node);
606 	if (!irq_table)
607 		return -ENOMEM;
608 
609 	dev->priv.irq_table = irq_table;
610 	return 0;
611 }
612 
mlx5_irq_table_cleanup(struct mlx5_core_dev * dev)613 void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
614 {
615 	if (mlx5_core_is_sf(dev))
616 		return;
617 
618 	kvfree(dev->priv.irq_table);
619 }
620 
mlx5_irq_table_get_num_comp(struct mlx5_irq_table * table)621 int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
622 {
623 	if (!table->pf_pool->xa_num_irqs.max)
624 		return 1;
625 	return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min;
626 }
627 
mlx5_irq_table_create(struct mlx5_core_dev * dev)628 int mlx5_irq_table_create(struct mlx5_core_dev *dev)
629 {
630 	int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
631 		      MLX5_CAP_GEN(dev, max_num_eqs) :
632 		      1 << MLX5_CAP_GEN(dev, log_max_eq);
633 	int total_vec;
634 	int pf_vec;
635 	int err;
636 
637 	if (mlx5_core_is_sf(dev))
638 		return 0;
639 
640 	pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
641 	pf_vec = min_t(int, pf_vec, num_eqs);
642 
643 	total_vec = pf_vec;
644 	if (mlx5_sf_max_functions(dev))
645 		total_vec += MLX5_IRQ_CTRL_SF_MAX +
646 			MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
647 
648 	total_vec = pci_alloc_irq_vectors(dev->pdev, 1, total_vec, PCI_IRQ_MSIX);
649 	if (total_vec < 0)
650 		return total_vec;
651 	pf_vec = min(pf_vec, total_vec);
652 
653 	err = irq_pools_init(dev, total_vec - pf_vec, pf_vec);
654 	if (err)
655 		pci_free_irq_vectors(dev->pdev);
656 
657 	return err;
658 }
659 
mlx5_irq_table_destroy(struct mlx5_core_dev * dev)660 void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
661 {
662 	struct mlx5_irq_table *table = dev->priv.irq_table;
663 
664 	if (mlx5_core_is_sf(dev))
665 		return;
666 
667 	/* There are cases where IRQs still will be in used when we reaching
668 	 * to here. Hence, making sure all the irqs are released.
669 	 */
670 	irq_pools_destroy(table);
671 	pci_free_irq_vectors(dev->pdev);
672 }
673 
mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table * table)674 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
675 {
676 	if (table->sf_comp_pool)
677 		return min_t(int, num_online_cpus(),
678 			     table->sf_comp_pool->xa_num_irqs.max -
679 			     table->sf_comp_pool->xa_num_irqs.min + 1);
680 	else
681 		return mlx5_irq_table_get_num_comp(table);
682 }
683 
mlx5_irq_table_get(struct mlx5_core_dev * dev)684 struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
685 {
686 #ifdef CONFIG_MLX5_SF
687 	if (mlx5_core_is_sf(dev))
688 		return dev->priv.parent_mdev->priv.irq_table;
689 #endif
690 	return dev->priv.irq_table;
691 }
692