1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (c) 2022 Microsoft Corporation. All rights reserved.
4 */
5
6 #ifndef _MANA_IB_H_
7 #define _MANA_IB_H_
8
9 #include <rdma/ib_verbs.h>
10 #include <rdma/ib_mad.h>
11 #include <rdma/ib_umem.h>
12 #include <rdma/mana-abi.h>
13 #include <rdma/uverbs_ioctl.h>
14 #include <linux/dmapool.h>
15
16 #include <net/mana/mana.h>
17 #include "shadow_queue.h"
18 #include "counters.h"
19
20 #define PAGE_SZ_BM \
21 (SZ_4K | SZ_8K | SZ_16K | SZ_32K | SZ_64K | SZ_128K | SZ_256K | \
22 SZ_512K | SZ_1M | SZ_2M)
23
24 /* MANA doesn't have any limit for MR size */
25 #define MANA_IB_MAX_MR_SIZE U64_MAX
26
27 /* Send queue ID mask */
28 #define MANA_SENDQ_MASK BIT(31)
29
30 /*
31 * The hardware limit of number of MRs is greater than maximum number of MRs
32 * that can possibly represent in 24 bits
33 */
34 #define MANA_IB_MAX_MR 0xFFFFFFu
35
36 /*
37 * The CA timeout is approx. 260ms (4us * 2^(DELAY))
38 */
39 #define MANA_CA_ACK_DELAY 16
40
41 /*
42 * The buffer used for writing AV
43 */
44 #define MANA_AV_BUFFER_SIZE 64
45
46 struct mana_ib_adapter_caps {
47 u32 max_sq_id;
48 u32 max_rq_id;
49 u32 max_cq_id;
50 u32 max_qp_count;
51 u32 max_cq_count;
52 u32 max_mr_count;
53 u32 max_pd_count;
54 u32 max_inbound_read_limit;
55 u32 max_outbound_read_limit;
56 u32 mw_count;
57 u32 max_srq_count;
58 u32 max_qp_wr;
59 u32 max_send_sge_count;
60 u32 max_recv_sge_count;
61 u32 max_inline_data_size;
62 u64 feature_flags;
63 u64 page_size_cap;
64 };
65
66 struct mana_ib_queue {
67 struct ib_umem *umem;
68 struct gdma_queue *kmem;
69 u64 gdma_region;
70 u64 id;
71 };
72
73 struct mana_ib_dev {
74 struct ib_device ib_dev;
75 struct gdma_dev *gdma_dev;
76 mana_handle_t adapter_handle;
77 struct gdma_queue *fatal_err_eq;
78 struct gdma_queue **eqs;
79 struct xarray qp_table_wq;
80 struct mana_ib_adapter_caps adapter_caps;
81 struct dma_pool *av_pool;
82 netdevice_tracker dev_tracker;
83 struct notifier_block nb;
84 };
85
86 struct mana_ib_wq {
87 struct ib_wq ibwq;
88 struct mana_ib_queue queue;
89 int wqe;
90 u32 wq_buf_size;
91 mana_handle_t rx_object;
92 };
93
94 struct mana_ib_pd {
95 struct ib_pd ibpd;
96 u32 pdn;
97 mana_handle_t pd_handle;
98
99 /* Mutex for sharing access to vport_use_count */
100 struct mutex vport_mutex;
101 int vport_use_count;
102
103 bool tx_shortform_allowed;
104 u32 tx_vp_offset;
105 };
106
107 struct mana_ib_av {
108 u8 dest_ip[16];
109 u8 dest_mac[ETH_ALEN];
110 u16 udp_src_port;
111 u8 src_ip[16];
112 u32 hop_limit : 8;
113 u32 reserved1 : 12;
114 u32 dscp : 6;
115 u32 reserved2 : 5;
116 u32 is_ipv6 : 1;
117 u32 reserved3 : 32;
118 };
119
120 struct mana_ib_ah {
121 struct ib_ah ibah;
122 struct mana_ib_av *av;
123 dma_addr_t dma_handle;
124 };
125
126 struct mana_ib_mr {
127 struct ib_mr ibmr;
128 struct ib_umem *umem;
129 mana_handle_t mr_handle;
130 };
131
132 struct mana_ib_cq {
133 struct ib_cq ibcq;
134 struct mana_ib_queue queue;
135 /* protects CQ polling */
136 spinlock_t cq_lock;
137 struct list_head list_send_qp;
138 struct list_head list_recv_qp;
139 int cqe;
140 u32 comp_vector;
141 mana_handle_t cq_handle;
142 };
143
144 enum mana_rc_queue_type {
145 MANA_RC_SEND_QUEUE_REQUESTER = 0,
146 MANA_RC_SEND_QUEUE_RESPONDER,
147 MANA_RC_SEND_QUEUE_FMR,
148 MANA_RC_RECV_QUEUE_REQUESTER,
149 MANA_RC_RECV_QUEUE_RESPONDER,
150 MANA_RC_QUEUE_TYPE_MAX,
151 };
152
153 struct mana_ib_rc_qp {
154 struct mana_ib_queue queues[MANA_RC_QUEUE_TYPE_MAX];
155 };
156
157 enum mana_ud_queue_type {
158 MANA_UD_SEND_QUEUE = 0,
159 MANA_UD_RECV_QUEUE,
160 MANA_UD_QUEUE_TYPE_MAX,
161 };
162
163 struct mana_ib_ud_qp {
164 struct mana_ib_queue queues[MANA_UD_QUEUE_TYPE_MAX];
165 u32 sq_psn;
166 };
167
168 struct mana_ib_qp {
169 struct ib_qp ibqp;
170
171 mana_handle_t qp_handle;
172 union {
173 struct mana_ib_queue raw_sq;
174 struct mana_ib_rc_qp rc_qp;
175 struct mana_ib_ud_qp ud_qp;
176 };
177
178 /* The port on the IB device, starting with 1 */
179 u32 port;
180
181 struct list_head cq_send_list;
182 struct list_head cq_recv_list;
183 struct shadow_queue shadow_rq;
184 struct shadow_queue shadow_sq;
185
186 refcount_t refcount;
187 struct completion free;
188 };
189
190 struct mana_ib_ucontext {
191 struct ib_ucontext ibucontext;
192 u32 doorbell;
193 };
194
195 struct mana_ib_rwq_ind_table {
196 struct ib_rwq_ind_table ib_ind_table;
197 };
198
199 enum mana_ib_command_code {
200 MANA_IB_GET_ADAPTER_CAP = 0x30001,
201 MANA_IB_CREATE_ADAPTER = 0x30002,
202 MANA_IB_DESTROY_ADAPTER = 0x30003,
203 MANA_IB_CONFIG_IP_ADDR = 0x30004,
204 MANA_IB_CONFIG_MAC_ADDR = 0x30005,
205 MANA_IB_CREATE_UD_QP = 0x30006,
206 MANA_IB_DESTROY_UD_QP = 0x30007,
207 MANA_IB_CREATE_CQ = 0x30008,
208 MANA_IB_DESTROY_CQ = 0x30009,
209 MANA_IB_CREATE_RC_QP = 0x3000a,
210 MANA_IB_DESTROY_RC_QP = 0x3000b,
211 MANA_IB_SET_QP_STATE = 0x3000d,
212 MANA_IB_QUERY_VF_COUNTERS = 0x30022,
213 MANA_IB_QUERY_DEVICE_COUNTERS = 0x30023,
214 };
215
216 struct mana_ib_query_adapter_caps_req {
217 struct gdma_req_hdr hdr;
218 }; /*HW Data */
219
220 enum mana_ib_adapter_features {
221 MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4),
222 MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT = BIT(5),
223 MANA_IB_FEATURE_MULTI_PORTS_SUPPORT = BIT(6),
224 };
225
226 struct mana_ib_query_adapter_caps_resp {
227 struct gdma_resp_hdr hdr;
228 u32 max_sq_id;
229 u32 max_rq_id;
230 u32 max_cq_id;
231 u32 max_qp_count;
232 u32 max_cq_count;
233 u32 max_mr_count;
234 u32 max_pd_count;
235 u32 max_inbound_read_limit;
236 u32 max_outbound_read_limit;
237 u32 mw_count;
238 u32 max_srq_count;
239 u32 max_requester_sq_size;
240 u32 max_responder_sq_size;
241 u32 max_requester_rq_size;
242 u32 max_responder_rq_size;
243 u32 max_send_sge_count;
244 u32 max_recv_sge_count;
245 u32 max_inline_data_size;
246 u64 feature_flags;
247 }; /* HW Data */
248
249 enum mana_ib_adapter_features_request {
250 MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST = BIT(1),
251 }; /*HW Data */
252
253 struct mana_rnic_create_adapter_req {
254 struct gdma_req_hdr hdr;
255 u32 notify_eq_id;
256 u32 reserved;
257 u64 feature_flags;
258 }; /*HW Data */
259
260 struct mana_rnic_create_adapter_resp {
261 struct gdma_resp_hdr hdr;
262 mana_handle_t adapter;
263 }; /* HW Data */
264
265 struct mana_rnic_destroy_adapter_req {
266 struct gdma_req_hdr hdr;
267 mana_handle_t adapter;
268 }; /*HW Data */
269
270 struct mana_rnic_destroy_adapter_resp {
271 struct gdma_resp_hdr hdr;
272 }; /* HW Data */
273
274 enum mana_ib_addr_op {
275 ADDR_OP_ADD = 1,
276 ADDR_OP_REMOVE = 2,
277 };
278
279 enum sgid_entry_type {
280 SGID_TYPE_IPV4 = 1,
281 SGID_TYPE_IPV6 = 2,
282 };
283
284 struct mana_rnic_config_addr_req {
285 struct gdma_req_hdr hdr;
286 mana_handle_t adapter;
287 enum mana_ib_addr_op op;
288 enum sgid_entry_type sgid_type;
289 u8 ip_addr[16];
290 }; /* HW Data */
291
292 struct mana_rnic_config_addr_resp {
293 struct gdma_resp_hdr hdr;
294 }; /* HW Data */
295
296 struct mana_rnic_config_mac_addr_req {
297 struct gdma_req_hdr hdr;
298 mana_handle_t adapter;
299 enum mana_ib_addr_op op;
300 u8 mac_addr[ETH_ALEN];
301 u8 reserved[6];
302 }; /* HW Data */
303
304 struct mana_rnic_config_mac_addr_resp {
305 struct gdma_resp_hdr hdr;
306 }; /* HW Data */
307
308 struct mana_rnic_create_cq_req {
309 struct gdma_req_hdr hdr;
310 mana_handle_t adapter;
311 u64 gdma_region;
312 u32 eq_id;
313 u32 doorbell_page;
314 }; /* HW Data */
315
316 struct mana_rnic_create_cq_resp {
317 struct gdma_resp_hdr hdr;
318 mana_handle_t cq_handle;
319 u32 cq_id;
320 u32 reserved;
321 }; /* HW Data */
322
323 struct mana_rnic_destroy_cq_req {
324 struct gdma_req_hdr hdr;
325 mana_handle_t adapter;
326 mana_handle_t cq_handle;
327 }; /* HW Data */
328
329 struct mana_rnic_destroy_cq_resp {
330 struct gdma_resp_hdr hdr;
331 }; /* HW Data */
332
333 enum mana_rnic_create_rc_flags {
334 MANA_RC_FLAG_NO_FMR = 2,
335 };
336
337 struct mana_rnic_create_qp_req {
338 struct gdma_req_hdr hdr;
339 mana_handle_t adapter;
340 mana_handle_t pd_handle;
341 mana_handle_t send_cq_handle;
342 mana_handle_t recv_cq_handle;
343 u64 dma_region[MANA_RC_QUEUE_TYPE_MAX];
344 u64 deprecated[2];
345 u64 flags;
346 u32 doorbell_page;
347 u32 max_send_wr;
348 u32 max_recv_wr;
349 u32 max_send_sge;
350 u32 max_recv_sge;
351 u32 reserved;
352 }; /* HW Data */
353
354 struct mana_rnic_create_qp_resp {
355 struct gdma_resp_hdr hdr;
356 mana_handle_t rc_qp_handle;
357 u32 queue_ids[MANA_RC_QUEUE_TYPE_MAX];
358 u32 reserved;
359 }; /* HW Data*/
360
361 struct mana_rnic_destroy_rc_qp_req {
362 struct gdma_req_hdr hdr;
363 mana_handle_t adapter;
364 mana_handle_t rc_qp_handle;
365 }; /* HW Data */
366
367 struct mana_rnic_destroy_rc_qp_resp {
368 struct gdma_resp_hdr hdr;
369 }; /* HW Data */
370
371 struct mana_rnic_create_udqp_req {
372 struct gdma_req_hdr hdr;
373 mana_handle_t adapter;
374 mana_handle_t pd_handle;
375 mana_handle_t send_cq_handle;
376 mana_handle_t recv_cq_handle;
377 u64 dma_region[MANA_UD_QUEUE_TYPE_MAX];
378 u32 qp_type;
379 u32 doorbell_page;
380 u32 max_send_wr;
381 u32 max_recv_wr;
382 u32 max_send_sge;
383 u32 max_recv_sge;
384 }; /* HW Data */
385
386 struct mana_rnic_create_udqp_resp {
387 struct gdma_resp_hdr hdr;
388 mana_handle_t qp_handle;
389 u32 queue_ids[MANA_UD_QUEUE_TYPE_MAX];
390 }; /* HW Data*/
391
392 struct mana_rnic_destroy_udqp_req {
393 struct gdma_req_hdr hdr;
394 mana_handle_t adapter;
395 mana_handle_t qp_handle;
396 }; /* HW Data */
397
398 struct mana_rnic_destroy_udqp_resp {
399 struct gdma_resp_hdr hdr;
400 }; /* HW Data */
401
402 struct mana_ib_ah_attr {
403 u8 src_addr[16];
404 u8 dest_addr[16];
405 u8 src_mac[ETH_ALEN];
406 u8 dest_mac[ETH_ALEN];
407 u8 src_addr_type;
408 u8 dest_addr_type;
409 u8 hop_limit;
410 u8 traffic_class;
411 u16 src_port;
412 u16 dest_port;
413 u32 reserved;
414 };
415
416 struct mana_rnic_set_qp_state_req {
417 struct gdma_req_hdr hdr;
418 mana_handle_t adapter;
419 mana_handle_t qp_handle;
420 u64 attr_mask;
421 u32 qp_state;
422 u32 path_mtu;
423 u32 rq_psn;
424 u32 sq_psn;
425 u32 dest_qpn;
426 u32 max_dest_rd_atomic;
427 u32 retry_cnt;
428 u32 rnr_retry;
429 u32 min_rnr_timer;
430 u32 reserved;
431 struct mana_ib_ah_attr ah_attr;
432 }; /* HW Data */
433
434 struct mana_rnic_set_qp_state_resp {
435 struct gdma_resp_hdr hdr;
436 }; /* HW Data */
437
438 enum WQE_OPCODE_TYPES {
439 WQE_TYPE_UD_SEND = 0,
440 WQE_TYPE_UD_RECV = 8,
441 }; /* HW DATA */
442
443 struct rdma_send_oob {
444 u32 wqe_type : 5;
445 u32 fence : 1;
446 u32 signaled : 1;
447 u32 solicited : 1;
448 u32 psn : 24;
449
450 u32 ssn_or_rqpn : 24;
451 u32 reserved1 : 8;
452 union {
453 struct {
454 u32 remote_qkey;
455 u32 immediate;
456 u32 reserved1;
457 u32 reserved2;
458 } ud_send;
459 };
460 }; /* HW DATA */
461
462 struct mana_rdma_cqe {
463 union {
464 struct {
465 u8 cqe_type;
466 u8 data[GDMA_COMP_DATA_SIZE - 1];
467 };
468 struct {
469 u32 cqe_type : 8;
470 u32 vendor_error : 9;
471 u32 reserved1 : 15;
472 u32 sge_offset : 5;
473 u32 tx_wqe_offset : 27;
474 } ud_send;
475 struct {
476 u32 cqe_type : 8;
477 u32 reserved1 : 24;
478 u32 msg_len;
479 u32 src_qpn : 24;
480 u32 reserved2 : 8;
481 u32 imm_data;
482 u32 rx_wqe_offset;
483 } ud_recv;
484 };
485 }; /* HW DATA */
486
487 struct mana_rnic_query_vf_cntrs_req {
488 struct gdma_req_hdr hdr;
489 mana_handle_t adapter;
490 }; /* HW Data */
491
492 struct mana_rnic_query_vf_cntrs_resp {
493 struct gdma_resp_hdr hdr;
494 u64 requester_timeout;
495 u64 requester_oos_nak;
496 u64 requester_rnr_nak;
497 u64 responder_rnr_nak;
498 u64 responder_oos;
499 u64 responder_dup_request;
500 u64 requester_implicit_nak;
501 u64 requester_readresp_psn_mismatch;
502 u64 nak_inv_req;
503 u64 nak_access_err;
504 u64 nak_opp_err;
505 u64 nak_inv_read;
506 u64 responder_local_len_err;
507 u64 requestor_local_prot_err;
508 u64 responder_rem_access_err;
509 u64 responder_local_qp_err;
510 u64 responder_malformed_wqe;
511 u64 general_hw_err;
512 u64 requester_rnr_nak_retries_exceeded;
513 u64 requester_retries_exceeded;
514 u64 total_fatal_err;
515 u64 received_cnps;
516 u64 num_qps_congested;
517 u64 rate_inc_events;
518 u64 num_qps_recovered;
519 u64 current_rate;
520 u64 dup_rx_req;
521 u64 tx_bytes;
522 u64 rx_bytes;
523 u64 rx_send_req;
524 u64 rx_write_req;
525 u64 rx_read_req;
526 u64 tx_pkt;
527 u64 rx_pkt;
528 }; /* HW Data */
529
530 struct mana_rnic_query_device_cntrs_req {
531 struct gdma_req_hdr hdr;
532 mana_handle_t adapter;
533 }; /* HW Data */
534
535 struct mana_rnic_query_device_cntrs_resp {
536 struct gdma_resp_hdr hdr;
537 u32 sent_cnps;
538 u32 received_ecns;
539 u32 reserved1;
540 u32 received_cnp_count;
541 u32 qp_congested_events;
542 u32 qp_recovered_events;
543 u32 rate_inc_events;
544 u32 reserved2;
545 }; /* HW Data */
546
mdev_to_gc(struct mana_ib_dev * mdev)547 static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
548 {
549 return mdev->gdma_dev->gdma_context;
550 }
551
mana_get_qp_ref(struct mana_ib_dev * mdev,u32 qid,bool is_sq)552 static inline struct mana_ib_qp *mana_get_qp_ref(struct mana_ib_dev *mdev,
553 u32 qid, bool is_sq)
554 {
555 struct mana_ib_qp *qp;
556 unsigned long flag;
557
558 if (is_sq)
559 qid |= MANA_SENDQ_MASK;
560
561 xa_lock_irqsave(&mdev->qp_table_wq, flag);
562 qp = xa_load(&mdev->qp_table_wq, qid);
563 if (qp)
564 refcount_inc(&qp->refcount);
565 xa_unlock_irqrestore(&mdev->qp_table_wq, flag);
566 return qp;
567 }
568
mana_put_qp_ref(struct mana_ib_qp * qp)569 static inline void mana_put_qp_ref(struct mana_ib_qp *qp)
570 {
571 if (refcount_dec_and_test(&qp->refcount))
572 complete(&qp->free);
573 }
574
mana_ib_is_rnic(struct mana_ib_dev * mdev)575 static inline bool mana_ib_is_rnic(struct mana_ib_dev *mdev)
576 {
577 return mdev->gdma_dev->dev_id.type == GDMA_DEVICE_MANA_IB;
578 }
579
mana_ib_get_netdev(struct ib_device * ibdev,u32 port)580 static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
581 {
582 struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
583 struct gdma_context *gc = mdev_to_gc(mdev);
584 struct mana_context *mc = gc->mana.driver_data;
585
586 if (port < 1 || port > mc->num_ports)
587 return NULL;
588 return mc->ports[port - 1];
589 }
590
copy_in_reverse(u8 * dst,const u8 * src,u32 size)591 static inline void copy_in_reverse(u8 *dst, const u8 *src, u32 size)
592 {
593 u32 i;
594
595 for (i = 0; i < size; i++)
596 dst[size - 1 - i] = src[i];
597 }
598
599 int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
600 void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
601
602 int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
603 mana_handle_t *gdma_region);
604
605 int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
606 mana_handle_t *gdma_region, u64 virt);
607
608 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
609 mana_handle_t gdma_region);
610
611 int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
612 struct mana_ib_queue *queue);
613 int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
614 struct mana_ib_queue *queue);
615 void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
616
617 struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
618 struct ib_wq_init_attr *init_attr,
619 struct ib_udata *udata);
620
621 int mana_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
622 u32 wq_attr_mask, struct ib_udata *udata);
623
624 int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata);
625
626 int mana_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
627 struct ib_rwq_ind_table_init_attr *init_attr,
628 struct ib_udata *udata);
629
630 int mana_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl);
631
632 struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags);
633
634 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
635 u64 iova, int access_flags,
636 struct ib_dmah *dmah,
637 struct ib_udata *udata);
638
639 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
640
641 int mana_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
642 struct ib_udata *udata);
643
644 int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
645 int attr_mask, struct ib_udata *udata);
646
647 int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
648
649 int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port_id,
650 struct mana_ib_pd *pd, u32 doorbell_id);
651 void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
652 u32 port);
653
654 int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
655 struct uverbs_attr_bundle *attrs);
656
657 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
658
659 int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
660 int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
661
662 int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
663 struct ib_udata *udata);
664 void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
665
666 int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma);
667
668 int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
669 struct ib_port_immutable *immutable);
670 int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
671 struct ib_udata *uhw);
672 int mana_ib_query_port(struct ib_device *ibdev, u32 port,
673 struct ib_port_attr *props);
674 int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
675 union ib_gid *gid);
676
677 void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
678
679 int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
680 int mana_eth_query_adapter_caps(struct mana_ib_dev *mdev);
681
682 int mana_ib_create_eqs(struct mana_ib_dev *mdev);
683
684 void mana_ib_destroy_eqs(struct mana_ib_dev *mdev);
685
686 int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev);
687
688 int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev);
689
690 int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
691
692 enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num);
693
694 int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context);
695
696 int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context);
697
698 int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac);
699
700 int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell);
701
702 int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
703
704 int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
705 struct ib_qp_init_attr *attr, u32 doorbell, u64 flags);
706 int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp);
707
708 int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
709 struct ib_qp_init_attr *attr, u32 doorbell, u32 type);
710 int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp);
711
712 int mana_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
713 struct ib_udata *udata);
714 int mana_ib_destroy_ah(struct ib_ah *ah, u32 flags);
715
716 int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
717 const struct ib_recv_wr **bad_wr);
718 int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
719 const struct ib_send_wr **bad_wr);
720
721 int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
722 int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
723
724 struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
725 u64 iova, int fd, int mr_access_flags,
726 struct ib_dmah *dmah,
727 struct uverbs_attr_bundle *attrs);
728 #endif
729