1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3
4 #ifndef _GDMA_H
5 #define _GDMA_H
6
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9
10 #include "shm_channel.h"
11
12 #define GDMA_STATUS_MORE_ENTRIES 0x00000105
13
14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
15 * them are naturally aligned and hence don't need __packed.
16 */
17
18 enum gdma_request_type {
19 GDMA_VERIFY_VF_DRIVER_VERSION = 1,
20 GDMA_QUERY_MAX_RESOURCES = 2,
21 GDMA_LIST_DEVICES = 3,
22 GDMA_REGISTER_DEVICE = 4,
23 GDMA_DEREGISTER_DEVICE = 5,
24 GDMA_GENERATE_TEST_EQE = 10,
25 GDMA_CREATE_QUEUE = 12,
26 GDMA_DISABLE_QUEUE = 13,
27 GDMA_ALLOCATE_RESOURCE_RANGE = 22,
28 GDMA_DESTROY_RESOURCE_RANGE = 24,
29 GDMA_CREATE_DMA_REGION = 25,
30 GDMA_DMA_REGION_ADD_PAGES = 26,
31 GDMA_DESTROY_DMA_REGION = 27,
32 GDMA_CREATE_PD = 29,
33 GDMA_DESTROY_PD = 30,
34 GDMA_CREATE_MR = 31,
35 GDMA_DESTROY_MR = 32,
36 GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */
37 };
38
39 #define GDMA_RESOURCE_DOORBELL_PAGE 27
40
41 enum gdma_queue_type {
42 GDMA_INVALID_QUEUE,
43 GDMA_SQ,
44 GDMA_RQ,
45 GDMA_CQ,
46 GDMA_EQ,
47 };
48
49 enum gdma_work_request_flags {
50 GDMA_WR_NONE = 0,
51 GDMA_WR_OOB_IN_SGL = BIT(0),
52 GDMA_WR_PAD_BY_SGE0 = BIT(1),
53 };
54
55 enum gdma_eqe_type {
56 GDMA_EQE_COMPLETION = 3,
57 GDMA_EQE_TEST_EVENT = 64,
58 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
59 GDMA_EQE_HWC_INIT_DATA = 130,
60 GDMA_EQE_HWC_INIT_DONE = 131,
61 GDMA_EQE_HWC_SOC_RECONFIG = 132,
62 GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
63 GDMA_EQE_RNIC_QP_FATAL = 176,
64 };
65
66 enum {
67 GDMA_DEVICE_NONE = 0,
68 GDMA_DEVICE_HWC = 1,
69 GDMA_DEVICE_MANA = 2,
70 GDMA_DEVICE_MANA_IB = 3,
71 };
72
73 struct gdma_resource {
74 /* Protect the bitmap */
75 spinlock_t lock;
76
77 /* The bitmap size in bits. */
78 u32 size;
79
80 /* The bitmap tracks the resources. */
81 unsigned long *map;
82 };
83
84 union gdma_doorbell_entry {
85 u64 as_uint64;
86
87 struct {
88 u64 id : 24;
89 u64 reserved : 8;
90 u64 tail_ptr : 31;
91 u64 arm : 1;
92 } cq;
93
94 struct {
95 u64 id : 24;
96 u64 wqe_cnt : 8;
97 u64 tail_ptr : 32;
98 } rq;
99
100 struct {
101 u64 id : 24;
102 u64 reserved : 8;
103 u64 tail_ptr : 32;
104 } sq;
105
106 struct {
107 u64 id : 16;
108 u64 reserved : 16;
109 u64 tail_ptr : 31;
110 u64 arm : 1;
111 } eq;
112 }; /* HW DATA */
113
114 struct gdma_msg_hdr {
115 u32 hdr_type;
116 u32 msg_type;
117 u16 msg_version;
118 u16 hwc_msg_id;
119 u32 msg_size;
120 }; /* HW DATA */
121
122 struct gdma_dev_id {
123 union {
124 struct {
125 u16 type;
126 u16 instance;
127 };
128
129 u32 as_uint32;
130 };
131 }; /* HW DATA */
132
133 struct gdma_req_hdr {
134 struct gdma_msg_hdr req;
135 struct gdma_msg_hdr resp; /* The expected response */
136 struct gdma_dev_id dev_id;
137 u32 activity_id;
138 }; /* HW DATA */
139
140 struct gdma_resp_hdr {
141 struct gdma_msg_hdr response;
142 struct gdma_dev_id dev_id;
143 u32 activity_id;
144 u32 status;
145 u32 reserved;
146 }; /* HW DATA */
147
148 struct gdma_general_req {
149 struct gdma_req_hdr hdr;
150 }; /* HW DATA */
151
152 #define GDMA_MESSAGE_V1 1
153 #define GDMA_MESSAGE_V2 2
154 #define GDMA_MESSAGE_V3 3
155
156 struct gdma_general_resp {
157 struct gdma_resp_hdr hdr;
158 }; /* HW DATA */
159
160 #define GDMA_STANDARD_HEADER_TYPE 0
161
mana_gd_init_req_hdr(struct gdma_req_hdr * hdr,u32 code,u32 req_size,u32 resp_size)162 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
163 u32 req_size, u32 resp_size)
164 {
165 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
166 hdr->req.msg_type = code;
167 hdr->req.msg_version = GDMA_MESSAGE_V1;
168 hdr->req.msg_size = req_size;
169
170 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
171 hdr->resp.msg_type = code;
172 hdr->resp.msg_version = GDMA_MESSAGE_V1;
173 hdr->resp.msg_size = resp_size;
174 }
175
176 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
177 struct gdma_sge {
178 u64 address;
179 u32 mem_key;
180 u32 size;
181 }; /* HW DATA */
182
183 struct gdma_wqe_request {
184 struct gdma_sge *sgl;
185 u32 num_sge;
186
187 u32 inline_oob_size;
188 const void *inline_oob_data;
189
190 u32 flags;
191 u32 client_data_unit;
192 };
193
194 enum gdma_page_type {
195 GDMA_PAGE_TYPE_4K,
196 };
197
198 #define GDMA_INVALID_DMA_REGION 0
199
200 struct gdma_mem_info {
201 struct device *dev;
202
203 dma_addr_t dma_handle;
204 void *virt_addr;
205 u64 length;
206
207 /* Allocated by the PF driver */
208 u64 dma_region_handle;
209 };
210
211 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
212
213 struct gdma_dev {
214 struct gdma_context *gdma_context;
215
216 struct gdma_dev_id dev_id;
217
218 u32 pdid;
219 u32 doorbell;
220 u32 gpa_mkey;
221
222 /* GDMA driver specific pointer */
223 void *driver_data;
224
225 struct auxiliary_device *adev;
226 };
227
228 /* MANA_PAGE_SIZE is the DMA unit */
229 #define MANA_PAGE_SHIFT 12
230 #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
231 #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
232 #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
233 #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
234
235 /* Required by HW */
236 #define MANA_MIN_QSIZE MANA_PAGE_SIZE
237
238 #define GDMA_CQE_SIZE 64
239 #define GDMA_EQE_SIZE 16
240 #define GDMA_MAX_SQE_SIZE 512
241 #define GDMA_MAX_RQE_SIZE 256
242
243 #define GDMA_COMP_DATA_SIZE 0x3C
244
245 #define GDMA_EVENT_DATA_SIZE 0xC
246
247 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
248 #define GDMA_WQE_BU_SIZE 32
249
250 #define INVALID_PDID UINT_MAX
251 #define INVALID_DOORBELL UINT_MAX
252 #define INVALID_MEM_KEY UINT_MAX
253 #define INVALID_QUEUE_ID UINT_MAX
254 #define INVALID_PCI_MSIX_INDEX UINT_MAX
255
256 struct gdma_comp {
257 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
258 u32 wq_num;
259 bool is_sq;
260 };
261
262 struct gdma_event {
263 u32 details[GDMA_EVENT_DATA_SIZE / 4];
264 u8 type;
265 };
266
267 struct gdma_queue;
268
269 struct mana_eq {
270 struct gdma_queue *eq;
271 };
272
273 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
274 struct gdma_event *e);
275
276 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
277
278 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
279 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
280 * driver increases the 'head' in BUs rather than in bytes, and notifies
281 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
282 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
283 *
284 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
285 * processed, the driver increases the 'tail' to indicate that WQEs have
286 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
287 *
288 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
289 * that the EQ/CQ is big enough so they can't overflow, and the driver uses
290 * the owner bits mechanism to detect if the queue has become empty.
291 */
292 struct gdma_queue {
293 struct gdma_dev *gdma_dev;
294
295 enum gdma_queue_type type;
296 u32 id;
297
298 struct gdma_mem_info mem_info;
299
300 void *queue_mem_ptr;
301 u32 queue_size;
302
303 bool monitor_avl_buf;
304
305 u32 head;
306 u32 tail;
307 struct list_head entry;
308
309 /* Extra fields specific to EQ/CQ. */
310 union {
311 struct {
312 bool disable_needed;
313
314 gdma_eq_callback *callback;
315 void *context;
316
317 unsigned int msix_index;
318
319 u32 log2_throttle_limit;
320 } eq;
321
322 struct {
323 gdma_cq_callback *callback;
324 void *context;
325
326 struct gdma_queue *parent; /* For CQ/EQ relationship */
327 } cq;
328 };
329 };
330
331 struct gdma_queue_spec {
332 enum gdma_queue_type type;
333 bool monitor_avl_buf;
334 unsigned int queue_size;
335
336 /* Extra fields specific to EQ/CQ. */
337 union {
338 struct {
339 gdma_eq_callback *callback;
340 void *context;
341
342 unsigned long log2_throttle_limit;
343 unsigned int msix_index;
344 } eq;
345
346 struct {
347 gdma_cq_callback *callback;
348 void *context;
349
350 struct gdma_queue *parent_eq;
351
352 } cq;
353 };
354 };
355
356 #define MANA_IRQ_NAME_SZ 32
357
358 struct gdma_irq_context {
359 void (*handler)(void *arg);
360 /* Protect the eq_list */
361 spinlock_t lock;
362 struct list_head eq_list;
363 char name[MANA_IRQ_NAME_SZ];
364 };
365
366 struct gdma_context {
367 struct device *dev;
368
369 /* Per-vPort max number of queues */
370 unsigned int max_num_queues;
371 unsigned int max_num_msix;
372 unsigned int num_msix_usable;
373 struct gdma_irq_context *irq_contexts;
374
375 /* L2 MTU */
376 u16 adapter_mtu;
377
378 /* This maps a CQ index to the queue structure. */
379 unsigned int max_num_cqs;
380 struct gdma_queue **cq_table;
381
382 /* Protect eq_test_event and test_event_eq_id */
383 struct mutex eq_test_event_mutex;
384 struct completion eq_test_event;
385 u32 test_event_eq_id;
386
387 bool is_pf;
388 phys_addr_t bar0_pa;
389 void __iomem *bar0_va;
390 void __iomem *shm_base;
391 void __iomem *db_page_base;
392 phys_addr_t phys_db_page_base;
393 u32 db_page_size;
394 int numa_node;
395
396 /* Shared memory chanenl (used to bootstrap HWC) */
397 struct shm_channel shm_channel;
398
399 /* Hardware communication channel (HWC) */
400 struct gdma_dev hwc;
401
402 /* Azure network adapter */
403 struct gdma_dev mana;
404
405 /* Azure RDMA adapter */
406 struct gdma_dev mana_ib;
407 };
408
mana_gd_is_mana(struct gdma_dev * gd)409 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
410 {
411 return gd->dev_id.type == GDMA_DEVICE_MANA;
412 }
413
mana_gd_is_hwc(struct gdma_dev * gd)414 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
415 {
416 return gd->dev_id.type == GDMA_DEVICE_HWC;
417 }
418
419 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
420 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
421
422 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
423
424 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
425 const struct gdma_queue_spec *spec,
426 struct gdma_queue **queue_ptr);
427
428 int mana_gd_create_mana_eq(struct gdma_dev *gd,
429 const struct gdma_queue_spec *spec,
430 struct gdma_queue **queue_ptr);
431
432 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
433 const struct gdma_queue_spec *spec,
434 struct gdma_queue **queue_ptr);
435
436 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
437
438 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
439
440 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
441
442 struct gdma_wqe {
443 u32 reserved :24;
444 u32 last_vbytes :8;
445
446 union {
447 u32 flags;
448
449 struct {
450 u32 num_sge :8;
451 u32 inline_oob_size_div4:3;
452 u32 client_oob_in_sgl :1;
453 u32 reserved1 :4;
454 u32 client_data_unit :14;
455 u32 reserved2 :2;
456 };
457 };
458 }; /* HW DATA */
459
460 #define INLINE_OOB_SMALL_SIZE 8
461 #define INLINE_OOB_LARGE_SIZE 24
462
463 #define MAX_TX_WQE_SIZE 512
464 #define MAX_RX_WQE_SIZE 256
465
466 #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \
467 sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
468 sizeof(struct gdma_sge))
469
470 #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \
471 sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
472
473 struct gdma_cqe {
474 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
475
476 union {
477 u32 as_uint32;
478
479 struct {
480 u32 wq_num : 24;
481 u32 is_sq : 1;
482 u32 reserved : 4;
483 u32 owner_bits : 3;
484 };
485 } cqe_info;
486 }; /* HW DATA */
487
488 #define GDMA_CQE_OWNER_BITS 3
489
490 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
491
492 #define SET_ARM_BIT 1
493
494 #define GDMA_EQE_OWNER_BITS 3
495
496 union gdma_eqe_info {
497 u32 as_uint32;
498
499 struct {
500 u32 type : 8;
501 u32 reserved1 : 8;
502 u32 client_id : 2;
503 u32 reserved2 : 11;
504 u32 owner_bits : 3;
505 };
506 }; /* HW DATA */
507
508 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
509 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
510
511 struct gdma_eqe {
512 u32 details[GDMA_EVENT_DATA_SIZE / 4];
513 u32 eqe_info;
514 }; /* HW DATA */
515
516 #define GDMA_REG_DB_PAGE_OFFSET 8
517 #define GDMA_REG_DB_PAGE_SIZE 0x10
518 #define GDMA_REG_SHM_OFFSET 0x18
519
520 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
521 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8
522 #define GDMA_PF_REG_SHM_OFF 0x70
523
524 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
525
526 #define MANA_PF_DEVICE_ID 0x00B9
527 #define MANA_VF_DEVICE_ID 0x00BA
528
529 struct gdma_posted_wqe_info {
530 u32 wqe_size_in_bu;
531 };
532
533 /* GDMA_GENERATE_TEST_EQE */
534 struct gdma_generate_test_event_req {
535 struct gdma_req_hdr hdr;
536 u32 queue_index;
537 }; /* HW DATA */
538
539 /* GDMA_VERIFY_VF_DRIVER_VERSION */
540 enum {
541 GDMA_PROTOCOL_V1 = 1,
542 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
543 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
544 };
545
546 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
547
548 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
549 * so the driver is able to reliably support features like busy_poll.
550 */
551 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
552 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
553 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
554
555 /* Driver can handle holes (zeros) in the device list */
556 #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
557
558 #define GDMA_DRV_CAP_FLAGS1 \
559 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
560 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
561 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
562 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
563 GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP)
564
565 #define GDMA_DRV_CAP_FLAGS2 0
566
567 #define GDMA_DRV_CAP_FLAGS3 0
568
569 #define GDMA_DRV_CAP_FLAGS4 0
570
571 struct gdma_verify_ver_req {
572 struct gdma_req_hdr hdr;
573
574 /* Mandatory fields required for protocol establishment */
575 u64 protocol_ver_min;
576 u64 protocol_ver_max;
577
578 /* Gdma Driver Capability Flags */
579 u64 gd_drv_cap_flags1;
580 u64 gd_drv_cap_flags2;
581 u64 gd_drv_cap_flags3;
582 u64 gd_drv_cap_flags4;
583
584 /* Advisory fields */
585 u64 drv_ver;
586 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
587 u32 reserved;
588 u32 os_ver_major;
589 u32 os_ver_minor;
590 u32 os_ver_build;
591 u32 os_ver_platform;
592 u64 reserved_2;
593 u8 os_ver_str1[128];
594 u8 os_ver_str2[128];
595 u8 os_ver_str3[128];
596 u8 os_ver_str4[128];
597 }; /* HW DATA */
598
599 struct gdma_verify_ver_resp {
600 struct gdma_resp_hdr hdr;
601 u64 gdma_protocol_ver;
602 u64 pf_cap_flags1;
603 u64 pf_cap_flags2;
604 u64 pf_cap_flags3;
605 u64 pf_cap_flags4;
606 }; /* HW DATA */
607
608 /* GDMA_QUERY_MAX_RESOURCES */
609 struct gdma_query_max_resources_resp {
610 struct gdma_resp_hdr hdr;
611 u32 status;
612 u32 max_sq;
613 u32 max_rq;
614 u32 max_cq;
615 u32 max_eq;
616 u32 max_db;
617 u32 max_mst;
618 u32 max_cq_mod_ctx;
619 u32 max_mod_cq;
620 u32 max_msix;
621 }; /* HW DATA */
622
623 /* GDMA_LIST_DEVICES */
624 #define GDMA_DEV_LIST_SIZE 64
625 struct gdma_list_devices_resp {
626 struct gdma_resp_hdr hdr;
627 u32 num_of_devs;
628 u32 reserved;
629 struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
630 }; /* HW DATA */
631
632 /* GDMA_REGISTER_DEVICE */
633 struct gdma_register_device_resp {
634 struct gdma_resp_hdr hdr;
635 u32 pdid;
636 u32 gpa_mkey;
637 u32 db_id;
638 }; /* HW DATA */
639
640 struct gdma_allocate_resource_range_req {
641 struct gdma_req_hdr hdr;
642 u32 resource_type;
643 u32 num_resources;
644 u32 alignment;
645 u32 allocated_resources;
646 };
647
648 struct gdma_allocate_resource_range_resp {
649 struct gdma_resp_hdr hdr;
650 u32 allocated_resources;
651 };
652
653 struct gdma_destroy_resource_range_req {
654 struct gdma_req_hdr hdr;
655 u32 resource_type;
656 u32 num_resources;
657 u32 allocated_resources;
658 };
659
660 /* GDMA_CREATE_QUEUE */
661 struct gdma_create_queue_req {
662 struct gdma_req_hdr hdr;
663 u32 type;
664 u32 reserved1;
665 u32 pdid;
666 u32 doolbell_id;
667 u64 gdma_region;
668 u32 reserved2;
669 u32 queue_size;
670 u32 log2_throttle_limit;
671 u32 eq_pci_msix_index;
672 u32 cq_mod_ctx_id;
673 u32 cq_parent_eq_id;
674 u8 rq_drop_on_overrun;
675 u8 rq_err_on_wqe_overflow;
676 u8 rq_chain_rec_wqes;
677 u8 sq_hw_db;
678 u32 reserved3;
679 }; /* HW DATA */
680
681 struct gdma_create_queue_resp {
682 struct gdma_resp_hdr hdr;
683 u32 queue_index;
684 }; /* HW DATA */
685
686 /* GDMA_DISABLE_QUEUE */
687 struct gdma_disable_queue_req {
688 struct gdma_req_hdr hdr;
689 u32 type;
690 u32 queue_index;
691 u32 alloc_res_id_on_creation;
692 }; /* HW DATA */
693
694 /* GDMA_QUERY_HWC_TIMEOUT */
695 struct gdma_query_hwc_timeout_req {
696 struct gdma_req_hdr hdr;
697 u32 timeout_ms;
698 u32 reserved;
699 };
700
701 struct gdma_query_hwc_timeout_resp {
702 struct gdma_resp_hdr hdr;
703 u32 timeout_ms;
704 u32 reserved;
705 };
706
707 enum atb_page_size {
708 ATB_PAGE_SIZE_4K,
709 ATB_PAGE_SIZE_8K,
710 ATB_PAGE_SIZE_16K,
711 ATB_PAGE_SIZE_32K,
712 ATB_PAGE_SIZE_64K,
713 ATB_PAGE_SIZE_128K,
714 ATB_PAGE_SIZE_256K,
715 ATB_PAGE_SIZE_512K,
716 ATB_PAGE_SIZE_1M,
717 ATB_PAGE_SIZE_2M,
718 ATB_PAGE_SIZE_MAX,
719 };
720
721 enum gdma_mr_access_flags {
722 GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
723 GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
724 GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
725 GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
726 GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
727 };
728
729 /* GDMA_CREATE_DMA_REGION */
730 struct gdma_create_dma_region_req {
731 struct gdma_req_hdr hdr;
732
733 /* The total size of the DMA region */
734 u64 length;
735
736 /* The offset in the first page */
737 u32 offset_in_page;
738
739 /* enum gdma_page_type */
740 u32 gdma_page_type;
741
742 /* The total number of pages */
743 u32 page_count;
744
745 /* If page_addr_list_len is smaller than page_count,
746 * the remaining page addresses will be added via the
747 * message GDMA_DMA_REGION_ADD_PAGES.
748 */
749 u32 page_addr_list_len;
750 u64 page_addr_list[];
751 }; /* HW DATA */
752
753 struct gdma_create_dma_region_resp {
754 struct gdma_resp_hdr hdr;
755 u64 dma_region_handle;
756 }; /* HW DATA */
757
758 /* GDMA_DMA_REGION_ADD_PAGES */
759 struct gdma_dma_region_add_pages_req {
760 struct gdma_req_hdr hdr;
761
762 u64 dma_region_handle;
763
764 u32 page_addr_list_len;
765 u32 reserved3;
766
767 u64 page_addr_list[];
768 }; /* HW DATA */
769
770 /* GDMA_DESTROY_DMA_REGION */
771 struct gdma_destroy_dma_region_req {
772 struct gdma_req_hdr hdr;
773
774 u64 dma_region_handle;
775 }; /* HW DATA */
776
777 enum gdma_pd_flags {
778 GDMA_PD_FLAG_INVALID = 0,
779 };
780
781 struct gdma_create_pd_req {
782 struct gdma_req_hdr hdr;
783 enum gdma_pd_flags flags;
784 u32 reserved;
785 };/* HW DATA */
786
787 struct gdma_create_pd_resp {
788 struct gdma_resp_hdr hdr;
789 u64 pd_handle;
790 u32 pd_id;
791 u32 reserved;
792 };/* HW DATA */
793
794 struct gdma_destroy_pd_req {
795 struct gdma_req_hdr hdr;
796 u64 pd_handle;
797 };/* HW DATA */
798
799 struct gdma_destory_pd_resp {
800 struct gdma_resp_hdr hdr;
801 };/* HW DATA */
802
803 enum gdma_mr_type {
804 /* Guest Virtual Address - MRs of this type allow access
805 * to memory mapped by PTEs associated with this MR using a virtual
806 * address that is set up in the MST
807 */
808 GDMA_MR_TYPE_GVA = 2,
809 };
810
811 struct gdma_create_mr_params {
812 u64 pd_handle;
813 enum gdma_mr_type mr_type;
814 union {
815 struct {
816 u64 dma_region_handle;
817 u64 virtual_address;
818 enum gdma_mr_access_flags access_flags;
819 } gva;
820 };
821 };
822
823 struct gdma_create_mr_request {
824 struct gdma_req_hdr hdr;
825 u64 pd_handle;
826 enum gdma_mr_type mr_type;
827 u32 reserved_1;
828
829 union {
830 struct {
831 u64 dma_region_handle;
832 u64 virtual_address;
833 enum gdma_mr_access_flags access_flags;
834 } gva;
835
836 };
837 u32 reserved_2;
838 };/* HW DATA */
839
840 struct gdma_create_mr_response {
841 struct gdma_resp_hdr hdr;
842 u64 mr_handle;
843 u32 lkey;
844 u32 rkey;
845 };/* HW DATA */
846
847 struct gdma_destroy_mr_request {
848 struct gdma_req_hdr hdr;
849 u64 mr_handle;
850 };/* HW DATA */
851
852 struct gdma_destroy_mr_response {
853 struct gdma_resp_hdr hdr;
854 };/* HW DATA */
855
856 int mana_gd_verify_vf_version(struct pci_dev *pdev);
857
858 int mana_gd_register_device(struct gdma_dev *gd);
859 int mana_gd_deregister_device(struct gdma_dev *gd);
860
861 int mana_gd_post_work_request(struct gdma_queue *wq,
862 const struct gdma_wqe_request *wqe_req,
863 struct gdma_posted_wqe_info *wqe_info);
864
865 int mana_gd_post_and_ring(struct gdma_queue *queue,
866 const struct gdma_wqe_request *wqe,
867 struct gdma_posted_wqe_info *wqe_info);
868
869 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
870 void mana_gd_free_res_map(struct gdma_resource *r);
871
872 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
873 struct gdma_queue *queue);
874
875 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
876 struct gdma_mem_info *gmi);
877
878 void mana_gd_free_memory(struct gdma_mem_info *gmi);
879
880 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
881 u32 resp_len, void *resp);
882
883 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
884
885 #endif /* _GDMA_H */
886