1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3
4 #ifndef _GDMA_H
5 #define _GDMA_H
6
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9
10 #include "shm_channel.h"
11
12 #define GDMA_STATUS_MORE_ENTRIES 0x00000105
13
14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
15 * them are naturally aligned and hence don't need __packed.
16 */
17
18 enum gdma_request_type {
19 GDMA_VERIFY_VF_DRIVER_VERSION = 1,
20 GDMA_QUERY_MAX_RESOURCES = 2,
21 GDMA_LIST_DEVICES = 3,
22 GDMA_REGISTER_DEVICE = 4,
23 GDMA_DEREGISTER_DEVICE = 5,
24 GDMA_GENERATE_TEST_EQE = 10,
25 GDMA_CREATE_QUEUE = 12,
26 GDMA_DISABLE_QUEUE = 13,
27 GDMA_ALLOCATE_RESOURCE_RANGE = 22,
28 GDMA_DESTROY_RESOURCE_RANGE = 24,
29 GDMA_CREATE_DMA_REGION = 25,
30 GDMA_DMA_REGION_ADD_PAGES = 26,
31 GDMA_DESTROY_DMA_REGION = 27,
32 GDMA_CREATE_PD = 29,
33 GDMA_DESTROY_PD = 30,
34 GDMA_CREATE_MR = 31,
35 GDMA_DESTROY_MR = 32,
36 };
37
38 #define GDMA_RESOURCE_DOORBELL_PAGE 27
39
40 enum gdma_queue_type {
41 GDMA_INVALID_QUEUE,
42 GDMA_SQ,
43 GDMA_RQ,
44 GDMA_CQ,
45 GDMA_EQ,
46 };
47
48 enum gdma_work_request_flags {
49 GDMA_WR_NONE = 0,
50 GDMA_WR_OOB_IN_SGL = BIT(0),
51 GDMA_WR_PAD_BY_SGE0 = BIT(1),
52 };
53
54 enum gdma_eqe_type {
55 GDMA_EQE_COMPLETION = 3,
56 GDMA_EQE_TEST_EVENT = 64,
57 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
58 GDMA_EQE_HWC_INIT_DATA = 130,
59 GDMA_EQE_HWC_INIT_DONE = 131,
60 };
61
62 enum {
63 GDMA_DEVICE_NONE = 0,
64 GDMA_DEVICE_HWC = 1,
65 GDMA_DEVICE_MANA = 2,
66 };
67
68 struct gdma_resource {
69 /* Protect the bitmap */
70 spinlock_t lock;
71
72 /* The bitmap size in bits. */
73 u32 size;
74
75 /* The bitmap tracks the resources. */
76 unsigned long *map;
77 };
78
79 union gdma_doorbell_entry {
80 u64 as_uint64;
81
82 struct {
83 u64 id : 24;
84 u64 reserved : 8;
85 u64 tail_ptr : 31;
86 u64 arm : 1;
87 } cq;
88
89 struct {
90 u64 id : 24;
91 u64 wqe_cnt : 8;
92 u64 tail_ptr : 32;
93 } rq;
94
95 struct {
96 u64 id : 24;
97 u64 reserved : 8;
98 u64 tail_ptr : 32;
99 } sq;
100
101 struct {
102 u64 id : 16;
103 u64 reserved : 16;
104 u64 tail_ptr : 31;
105 u64 arm : 1;
106 } eq;
107 }; /* HW DATA */
108
109 struct gdma_msg_hdr {
110 u32 hdr_type;
111 u32 msg_type;
112 u16 msg_version;
113 u16 hwc_msg_id;
114 u32 msg_size;
115 }; /* HW DATA */
116
117 struct gdma_dev_id {
118 union {
119 struct {
120 u16 type;
121 u16 instance;
122 };
123
124 u32 as_uint32;
125 };
126 }; /* HW DATA */
127
128 struct gdma_req_hdr {
129 struct gdma_msg_hdr req;
130 struct gdma_msg_hdr resp; /* The expected response */
131 struct gdma_dev_id dev_id;
132 u32 activity_id;
133 }; /* HW DATA */
134
135 struct gdma_resp_hdr {
136 struct gdma_msg_hdr response;
137 struct gdma_dev_id dev_id;
138 u32 activity_id;
139 u32 status;
140 u32 reserved;
141 }; /* HW DATA */
142
143 struct gdma_general_req {
144 struct gdma_req_hdr hdr;
145 }; /* HW DATA */
146
147 #define GDMA_MESSAGE_V1 1
148
149 struct gdma_general_resp {
150 struct gdma_resp_hdr hdr;
151 }; /* HW DATA */
152
153 #define GDMA_STANDARD_HEADER_TYPE 0
154
mana_gd_init_req_hdr(struct gdma_req_hdr * hdr,u32 code,u32 req_size,u32 resp_size)155 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
156 u32 req_size, u32 resp_size)
157 {
158 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
159 hdr->req.msg_type = code;
160 hdr->req.msg_version = GDMA_MESSAGE_V1;
161 hdr->req.msg_size = req_size;
162
163 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
164 hdr->resp.msg_type = code;
165 hdr->resp.msg_version = GDMA_MESSAGE_V1;
166 hdr->resp.msg_size = resp_size;
167 }
168
169 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
170 struct gdma_sge {
171 u64 address;
172 u32 mem_key;
173 u32 size;
174 }; /* HW DATA */
175
176 struct gdma_wqe_request {
177 struct gdma_sge *sgl;
178 u32 num_sge;
179
180 u32 inline_oob_size;
181 const void *inline_oob_data;
182
183 u32 flags;
184 u32 client_data_unit;
185 };
186
187 enum gdma_page_type {
188 GDMA_PAGE_TYPE_4K,
189 };
190
191 #define GDMA_INVALID_DMA_REGION 0
192
193 struct gdma_mem_info {
194 struct device *dev;
195
196 dma_addr_t dma_handle;
197 void *virt_addr;
198 u64 length;
199
200 /* Allocated by the PF driver */
201 u64 dma_region_handle;
202 };
203
204 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
205
206 struct gdma_dev {
207 struct gdma_context *gdma_context;
208
209 struct gdma_dev_id dev_id;
210
211 u32 pdid;
212 u32 doorbell;
213 u32 gpa_mkey;
214
215 /* GDMA driver specific pointer */
216 void *driver_data;
217
218 struct auxiliary_device *adev;
219 };
220
221 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
222
223 #define GDMA_CQE_SIZE 64
224 #define GDMA_EQE_SIZE 16
225 #define GDMA_MAX_SQE_SIZE 512
226 #define GDMA_MAX_RQE_SIZE 256
227
228 #define GDMA_COMP_DATA_SIZE 0x3C
229
230 #define GDMA_EVENT_DATA_SIZE 0xC
231
232 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
233 #define GDMA_WQE_BU_SIZE 32
234
235 #define INVALID_PDID UINT_MAX
236 #define INVALID_DOORBELL UINT_MAX
237 #define INVALID_MEM_KEY UINT_MAX
238 #define INVALID_QUEUE_ID UINT_MAX
239 #define INVALID_PCI_MSIX_INDEX UINT_MAX
240
241 struct gdma_comp {
242 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
243 u32 wq_num;
244 bool is_sq;
245 };
246
247 struct gdma_event {
248 u32 details[GDMA_EVENT_DATA_SIZE / 4];
249 u8 type;
250 };
251
252 struct gdma_queue;
253
254 struct mana_eq {
255 struct gdma_queue *eq;
256 };
257
258 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
259 struct gdma_event *e);
260
261 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
262
263 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
264 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
265 * driver increases the 'head' in BUs rather than in bytes, and notifies
266 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
267 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
268 *
269 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
270 * processed, the driver increases the 'tail' to indicate that WQEs have
271 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
272 *
273 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
274 * that the EQ/CQ is big enough so they can't overflow, and the driver uses
275 * the owner bits mechanism to detect if the queue has become empty.
276 */
277 struct gdma_queue {
278 struct gdma_dev *gdma_dev;
279
280 enum gdma_queue_type type;
281 u32 id;
282
283 struct gdma_mem_info mem_info;
284
285 void *queue_mem_ptr;
286 u32 queue_size;
287
288 bool monitor_avl_buf;
289
290 u32 head;
291 u32 tail;
292
293 /* Extra fields specific to EQ/CQ. */
294 union {
295 struct {
296 bool disable_needed;
297
298 gdma_eq_callback *callback;
299 void *context;
300
301 unsigned int msix_index;
302
303 u32 log2_throttle_limit;
304 } eq;
305
306 struct {
307 gdma_cq_callback *callback;
308 void *context;
309
310 struct gdma_queue *parent; /* For CQ/EQ relationship */
311 } cq;
312 };
313 };
314
315 struct gdma_queue_spec {
316 enum gdma_queue_type type;
317 bool monitor_avl_buf;
318 unsigned int queue_size;
319
320 /* Extra fields specific to EQ/CQ. */
321 union {
322 struct {
323 gdma_eq_callback *callback;
324 void *context;
325
326 unsigned long log2_throttle_limit;
327 } eq;
328
329 struct {
330 gdma_cq_callback *callback;
331 void *context;
332
333 struct gdma_queue *parent_eq;
334
335 } cq;
336 };
337 };
338
339 #define MANA_IRQ_NAME_SZ 32
340
341 struct gdma_irq_context {
342 void (*handler)(void *arg);
343 void *arg;
344 char name[MANA_IRQ_NAME_SZ];
345 };
346
347 struct gdma_context {
348 struct device *dev;
349
350 /* Per-vPort max number of queues */
351 unsigned int max_num_queues;
352 unsigned int max_num_msix;
353 unsigned int num_msix_usable;
354 struct gdma_resource msix_resource;
355 struct gdma_irq_context *irq_contexts;
356
357 /* This maps a CQ index to the queue structure. */
358 unsigned int max_num_cqs;
359 struct gdma_queue **cq_table;
360
361 /* Protect eq_test_event and test_event_eq_id */
362 struct mutex eq_test_event_mutex;
363 struct completion eq_test_event;
364 u32 test_event_eq_id;
365
366 bool is_pf;
367 phys_addr_t bar0_pa;
368 void __iomem *bar0_va;
369 void __iomem *shm_base;
370 void __iomem *db_page_base;
371 phys_addr_t phys_db_page_base;
372 u32 db_page_size;
373 int numa_node;
374
375 /* Shared memory chanenl (used to bootstrap HWC) */
376 struct shm_channel shm_channel;
377
378 /* Hardware communication channel (HWC) */
379 struct gdma_dev hwc;
380
381 /* Azure network adapter */
382 struct gdma_dev mana;
383 };
384
385 #define MAX_NUM_GDMA_DEVICES 4
386
mana_gd_is_mana(struct gdma_dev * gd)387 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
388 {
389 return gd->dev_id.type == GDMA_DEVICE_MANA;
390 }
391
mana_gd_is_hwc(struct gdma_dev * gd)392 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
393 {
394 return gd->dev_id.type == GDMA_DEVICE_HWC;
395 }
396
397 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
398 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
399
400 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
401
402 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
403 const struct gdma_queue_spec *spec,
404 struct gdma_queue **queue_ptr);
405
406 int mana_gd_create_mana_eq(struct gdma_dev *gd,
407 const struct gdma_queue_spec *spec,
408 struct gdma_queue **queue_ptr);
409
410 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
411 const struct gdma_queue_spec *spec,
412 struct gdma_queue **queue_ptr);
413
414 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
415
416 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
417
418 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
419
420 struct gdma_wqe {
421 u32 reserved :24;
422 u32 last_vbytes :8;
423
424 union {
425 u32 flags;
426
427 struct {
428 u32 num_sge :8;
429 u32 inline_oob_size_div4:3;
430 u32 client_oob_in_sgl :1;
431 u32 reserved1 :4;
432 u32 client_data_unit :14;
433 u32 reserved2 :2;
434 };
435 };
436 }; /* HW DATA */
437
438 #define INLINE_OOB_SMALL_SIZE 8
439 #define INLINE_OOB_LARGE_SIZE 24
440
441 #define MAX_TX_WQE_SIZE 512
442 #define MAX_RX_WQE_SIZE 256
443
444 #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \
445 sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
446 sizeof(struct gdma_sge))
447
448 #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \
449 sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
450
451 struct gdma_cqe {
452 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
453
454 union {
455 u32 as_uint32;
456
457 struct {
458 u32 wq_num : 24;
459 u32 is_sq : 1;
460 u32 reserved : 4;
461 u32 owner_bits : 3;
462 };
463 } cqe_info;
464 }; /* HW DATA */
465
466 #define GDMA_CQE_OWNER_BITS 3
467
468 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
469
470 #define SET_ARM_BIT 1
471
472 #define GDMA_EQE_OWNER_BITS 3
473
474 union gdma_eqe_info {
475 u32 as_uint32;
476
477 struct {
478 u32 type : 8;
479 u32 reserved1 : 8;
480 u32 client_id : 2;
481 u32 reserved2 : 11;
482 u32 owner_bits : 3;
483 };
484 }; /* HW DATA */
485
486 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
487 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
488
489 struct gdma_eqe {
490 u32 details[GDMA_EVENT_DATA_SIZE / 4];
491 u32 eqe_info;
492 }; /* HW DATA */
493
494 #define GDMA_REG_DB_PAGE_OFFSET 8
495 #define GDMA_REG_DB_PAGE_SIZE 0x10
496 #define GDMA_REG_SHM_OFFSET 0x18
497
498 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
499 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8
500 #define GDMA_PF_REG_SHM_OFF 0x70
501
502 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
503
504 #define MANA_PF_DEVICE_ID 0x00B9
505 #define MANA_VF_DEVICE_ID 0x00BA
506
507 struct gdma_posted_wqe_info {
508 u32 wqe_size_in_bu;
509 };
510
511 /* GDMA_GENERATE_TEST_EQE */
512 struct gdma_generate_test_event_req {
513 struct gdma_req_hdr hdr;
514 u32 queue_index;
515 }; /* HW DATA */
516
517 /* GDMA_VERIFY_VF_DRIVER_VERSION */
518 enum {
519 GDMA_PROTOCOL_V1 = 1,
520 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
521 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
522 };
523
524 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
525
526 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
527 * so the driver is able to reliably support features like busy_poll.
528 */
529 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
530
531 #define GDMA_DRV_CAP_FLAGS1 \
532 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
533 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)
534
535 #define GDMA_DRV_CAP_FLAGS2 0
536
537 #define GDMA_DRV_CAP_FLAGS3 0
538
539 #define GDMA_DRV_CAP_FLAGS4 0
540
541 struct gdma_verify_ver_req {
542 struct gdma_req_hdr hdr;
543
544 /* Mandatory fields required for protocol establishment */
545 u64 protocol_ver_min;
546 u64 protocol_ver_max;
547
548 /* Gdma Driver Capability Flags */
549 u64 gd_drv_cap_flags1;
550 u64 gd_drv_cap_flags2;
551 u64 gd_drv_cap_flags3;
552 u64 gd_drv_cap_flags4;
553
554 /* Advisory fields */
555 u64 drv_ver;
556 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
557 u32 reserved;
558 u32 os_ver_major;
559 u32 os_ver_minor;
560 u32 os_ver_build;
561 u32 os_ver_platform;
562 u64 reserved_2;
563 u8 os_ver_str1[128];
564 u8 os_ver_str2[128];
565 u8 os_ver_str3[128];
566 u8 os_ver_str4[128];
567 }; /* HW DATA */
568
569 struct gdma_verify_ver_resp {
570 struct gdma_resp_hdr hdr;
571 u64 gdma_protocol_ver;
572 u64 pf_cap_flags1;
573 u64 pf_cap_flags2;
574 u64 pf_cap_flags3;
575 u64 pf_cap_flags4;
576 }; /* HW DATA */
577
578 /* GDMA_QUERY_MAX_RESOURCES */
579 struct gdma_query_max_resources_resp {
580 struct gdma_resp_hdr hdr;
581 u32 status;
582 u32 max_sq;
583 u32 max_rq;
584 u32 max_cq;
585 u32 max_eq;
586 u32 max_db;
587 u32 max_mst;
588 u32 max_cq_mod_ctx;
589 u32 max_mod_cq;
590 u32 max_msix;
591 }; /* HW DATA */
592
593 /* GDMA_LIST_DEVICES */
594 struct gdma_list_devices_resp {
595 struct gdma_resp_hdr hdr;
596 u32 num_of_devs;
597 u32 reserved;
598 struct gdma_dev_id devs[64];
599 }; /* HW DATA */
600
601 /* GDMA_REGISTER_DEVICE */
602 struct gdma_register_device_resp {
603 struct gdma_resp_hdr hdr;
604 u32 pdid;
605 u32 gpa_mkey;
606 u32 db_id;
607 }; /* HW DATA */
608
609 struct gdma_allocate_resource_range_req {
610 struct gdma_req_hdr hdr;
611 u32 resource_type;
612 u32 num_resources;
613 u32 alignment;
614 u32 allocated_resources;
615 };
616
617 struct gdma_allocate_resource_range_resp {
618 struct gdma_resp_hdr hdr;
619 u32 allocated_resources;
620 };
621
622 struct gdma_destroy_resource_range_req {
623 struct gdma_req_hdr hdr;
624 u32 resource_type;
625 u32 num_resources;
626 u32 allocated_resources;
627 };
628
629 /* GDMA_CREATE_QUEUE */
630 struct gdma_create_queue_req {
631 struct gdma_req_hdr hdr;
632 u32 type;
633 u32 reserved1;
634 u32 pdid;
635 u32 doolbell_id;
636 u64 gdma_region;
637 u32 reserved2;
638 u32 queue_size;
639 u32 log2_throttle_limit;
640 u32 eq_pci_msix_index;
641 u32 cq_mod_ctx_id;
642 u32 cq_parent_eq_id;
643 u8 rq_drop_on_overrun;
644 u8 rq_err_on_wqe_overflow;
645 u8 rq_chain_rec_wqes;
646 u8 sq_hw_db;
647 u32 reserved3;
648 }; /* HW DATA */
649
650 struct gdma_create_queue_resp {
651 struct gdma_resp_hdr hdr;
652 u32 queue_index;
653 }; /* HW DATA */
654
655 /* GDMA_DISABLE_QUEUE */
656 struct gdma_disable_queue_req {
657 struct gdma_req_hdr hdr;
658 u32 type;
659 u32 queue_index;
660 u32 alloc_res_id_on_creation;
661 }; /* HW DATA */
662
663 enum atb_page_size {
664 ATB_PAGE_SIZE_4K,
665 ATB_PAGE_SIZE_8K,
666 ATB_PAGE_SIZE_16K,
667 ATB_PAGE_SIZE_32K,
668 ATB_PAGE_SIZE_64K,
669 ATB_PAGE_SIZE_128K,
670 ATB_PAGE_SIZE_256K,
671 ATB_PAGE_SIZE_512K,
672 ATB_PAGE_SIZE_1M,
673 ATB_PAGE_SIZE_2M,
674 ATB_PAGE_SIZE_MAX,
675 };
676
677 enum gdma_mr_access_flags {
678 GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
679 GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
680 GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
681 GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
682 GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
683 };
684
685 /* GDMA_CREATE_DMA_REGION */
686 struct gdma_create_dma_region_req {
687 struct gdma_req_hdr hdr;
688
689 /* The total size of the DMA region */
690 u64 length;
691
692 /* The offset in the first page */
693 u32 offset_in_page;
694
695 /* enum gdma_page_type */
696 u32 gdma_page_type;
697
698 /* The total number of pages */
699 u32 page_count;
700
701 /* If page_addr_list_len is smaller than page_count,
702 * the remaining page addresses will be added via the
703 * message GDMA_DMA_REGION_ADD_PAGES.
704 */
705 u32 page_addr_list_len;
706 u64 page_addr_list[];
707 }; /* HW DATA */
708
709 struct gdma_create_dma_region_resp {
710 struct gdma_resp_hdr hdr;
711 u64 dma_region_handle;
712 }; /* HW DATA */
713
714 /* GDMA_DMA_REGION_ADD_PAGES */
715 struct gdma_dma_region_add_pages_req {
716 struct gdma_req_hdr hdr;
717
718 u64 dma_region_handle;
719
720 u32 page_addr_list_len;
721 u32 reserved3;
722
723 u64 page_addr_list[];
724 }; /* HW DATA */
725
726 /* GDMA_DESTROY_DMA_REGION */
727 struct gdma_destroy_dma_region_req {
728 struct gdma_req_hdr hdr;
729
730 u64 dma_region_handle;
731 }; /* HW DATA */
732
733 enum gdma_pd_flags {
734 GDMA_PD_FLAG_INVALID = 0,
735 };
736
737 struct gdma_create_pd_req {
738 struct gdma_req_hdr hdr;
739 enum gdma_pd_flags flags;
740 u32 reserved;
741 };/* HW DATA */
742
743 struct gdma_create_pd_resp {
744 struct gdma_resp_hdr hdr;
745 u64 pd_handle;
746 u32 pd_id;
747 u32 reserved;
748 };/* HW DATA */
749
750 struct gdma_destroy_pd_req {
751 struct gdma_req_hdr hdr;
752 u64 pd_handle;
753 };/* HW DATA */
754
755 struct gdma_destory_pd_resp {
756 struct gdma_resp_hdr hdr;
757 };/* HW DATA */
758
759 enum gdma_mr_type {
760 /* Guest Virtual Address - MRs of this type allow access
761 * to memory mapped by PTEs associated with this MR using a virtual
762 * address that is set up in the MST
763 */
764 GDMA_MR_TYPE_GVA = 2,
765 };
766
767 struct gdma_create_mr_params {
768 u64 pd_handle;
769 enum gdma_mr_type mr_type;
770 union {
771 struct {
772 u64 dma_region_handle;
773 u64 virtual_address;
774 enum gdma_mr_access_flags access_flags;
775 } gva;
776 };
777 };
778
779 struct gdma_create_mr_request {
780 struct gdma_req_hdr hdr;
781 u64 pd_handle;
782 enum gdma_mr_type mr_type;
783 u32 reserved_1;
784
785 union {
786 struct {
787 u64 dma_region_handle;
788 u64 virtual_address;
789 enum gdma_mr_access_flags access_flags;
790 } gva;
791
792 };
793 u32 reserved_2;
794 };/* HW DATA */
795
796 struct gdma_create_mr_response {
797 struct gdma_resp_hdr hdr;
798 u64 mr_handle;
799 u32 lkey;
800 u32 rkey;
801 };/* HW DATA */
802
803 struct gdma_destroy_mr_request {
804 struct gdma_req_hdr hdr;
805 u64 mr_handle;
806 };/* HW DATA */
807
808 struct gdma_destroy_mr_response {
809 struct gdma_resp_hdr hdr;
810 };/* HW DATA */
811
812 int mana_gd_verify_vf_version(struct pci_dev *pdev);
813
814 int mana_gd_register_device(struct gdma_dev *gd);
815 int mana_gd_deregister_device(struct gdma_dev *gd);
816
817 int mana_gd_post_work_request(struct gdma_queue *wq,
818 const struct gdma_wqe_request *wqe_req,
819 struct gdma_posted_wqe_info *wqe_info);
820
821 int mana_gd_post_and_ring(struct gdma_queue *queue,
822 const struct gdma_wqe_request *wqe,
823 struct gdma_posted_wqe_info *wqe_info);
824
825 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
826 void mana_gd_free_res_map(struct gdma_resource *r);
827
828 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
829 struct gdma_queue *queue);
830
831 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
832 struct gdma_mem_info *gmi);
833
834 void mana_gd_free_memory(struct gdma_mem_info *gmi);
835
836 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
837 u32 resp_len, void *resp);
838
839 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
840
841 #endif /* _GDMA_H */
842